Commit 0f6d7890 by Enrico Pozzobon

Merge branch 'email-submissions'

parents 4eeaf6aa b974ca1d
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "aead-common.h"
int aead_check_tag
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned size)
{
/* Set "accum" to -1 if the tags match, or 0 if they don't match */
int accum = 0;
while (size > 0) {
accum |= (*tag1++ ^ *tag2++);
--size;
}
accum = (accum - 1) >> 8;
/* Destroy the plaintext if the tag match failed */
while (plaintext_len > 0) {
*plaintext++ &= accum;
--plaintext_len;
}
/* If "accum" is 0, return -1, otherwise return 0 */
return ~accum;
}
int aead_check_tag_precheck
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned size, int precheck)
{
/* Set "accum" to -1 if the tags match, or 0 if they don't match */
int accum = 0;
while (size > 0) {
accum |= (*tag1++ ^ *tag2++);
--size;
}
accum = ((accum - 1) >> 8) & precheck;
/* Destroy the plaintext if the tag match failed */
while (plaintext_len > 0) {
*plaintext++ &= accum;
--plaintext_len;
}
/* If "accum" is 0, return -1, otherwise return 0 */
return ~accum;
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LWCRYPTO_AEAD_COMMON_H
#define LWCRYPTO_AEAD_COMMON_H
#include <stddef.h>
/**
* \file aead-common.h
* \brief Definitions that are common across AEAD schemes.
*
* AEAD stands for "Authenticated Encryption with Associated Data".
* It is a standard API pattern for securely encrypting and
* authenticating packets of data.
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Encrypts and authenticates a packet with an AEAD scheme.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - normally not used by AEAD schemes.
* \param npub Points to the public nonce for the packet.
* \param k Points to the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*/
typedef int (*aead_cipher_encrypt_t)
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with an AEAD scheme.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - normally not used by AEAD schemes.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet.
* \param k Points to the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*/
typedef int (*aead_cipher_decrypt_t)
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Hashes a block of input data.
*
* \param out Buffer to receive the hash output.
* \param in Points to the input data to be hashed.
* \param inlen Length of the input data in bytes.
*
* \return Returns zero on success or -1 if there was an error in the
* parameters.
*/
typedef int (*aead_hash_t)
(unsigned char *out, const unsigned char *in, unsigned long long inlen);
/**
* \brief Initializes the state for a hashing operation.
*
* \param state Hash state to be initialized.
*/
typedef void (*aead_hash_init_t)(void *state);
/**
* \brief Updates a hash state with more input data.
*
* \param state Hash state to be updated.
* \param in Points to the input data to be incorporated into the state.
* \param inlen Length of the input data to be incorporated into the state.
*/
typedef void (*aead_hash_update_t)
(void *state, const unsigned char *in, unsigned long long inlen);
/**
* \brief Returns the final hash value from a hashing operation.
*
* \param Hash state to be finalized.
* \param out Points to the output buffer to receive the hash value.
*/
typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out);
/**
* \brief Aborbs more input data into an XOF state.
*
* \param state XOF state to be updated.
* \param in Points to the input data to be absorbed into the state.
* \param inlen Length of the input data to be absorbed into the state.
*
* \sa ascon_xof_init(), ascon_xof_squeeze()
*/
typedef void (*aead_xof_absorb_t)
(void *state, const unsigned char *in, unsigned long long inlen);
/**
* \brief Squeezes output data from an XOF state.
*
* \param state XOF state to squeeze the output data from.
* \param out Points to the output buffer to receive the squeezed data.
* \param outlen Number of bytes of data to squeeze out of the state.
*/
typedef void (*aead_xof_squeeze_t)
(void *state, unsigned char *out, unsigned long long outlen);
/**
* \brief No special AEAD features.
*/
#define AEAD_FLAG_NONE 0x0000
/**
* \brief The natural byte order of the AEAD cipher is little-endian.
*
* If this flag is not present, then the natural byte order of the
* AEAD cipher should be assumed to be big-endian.
*
* The natural byte order may be useful when formatting packet sequence
* numbers as nonces. The application needs to know whether the sequence
* number should be packed into the leading or trailing bytes of the nonce.
*/
#define AEAD_FLAG_LITTLE_ENDIAN 0x0001
/**
* \brief Meta-information about an AEAD cipher.
*/
typedef struct
{
const char *name; /**< Name of the cipher */
unsigned key_len; /**< Length of the key in bytes */
unsigned nonce_len; /**< Length of the nonce in bytes */
unsigned tag_len; /**< Length of the tag in bytes */
unsigned flags; /**< Flags for extra features */
aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */
aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */
} aead_cipher_t;
/**
* \brief Meta-information about a hash algorithm that is related to an AEAD.
*
* Regular hash algorithms should provide the "hash", "init", "update",
* and "finalize" functions. Extensible Output Functions (XOF's) should
* proivde the "hash", "init", "absorb", and "squeeze" functions.
*/
typedef struct
{
const char *name; /**< Name of the hash algorithm */
size_t state_size; /**< Size of the incremental state structure */
unsigned hash_len; /**< Length of the hash in bytes */
unsigned flags; /**< Flags for extra features */
aead_hash_t hash; /**< All in one hashing function */
aead_hash_init_t init; /**< Incremental hash/XOF init function */
aead_hash_update_t update; /**< Incremental hash update function */
aead_hash_finalize_t finalize; /**< Incremental hash finalize function */
aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */
aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */
} aead_hash_algorithm_t;
/**
* \brief Check an authentication tag in constant time.
*
* \param plaintext Points to the plaintext data.
* \param plaintext_len Length of the plaintext in bytes.
* \param tag1 First tag to compare.
* \param tag2 Second tag to compare.
* \param tag_len Length of the tags in bytes.
*
* \return Returns -1 if the tag check failed or 0 if the check succeeded.
*
* If the tag check fails, then the \a plaintext will also be zeroed to
* prevent it from being used accidentally by the application when the
* ciphertext was invalid.
*/
int aead_check_tag
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned tag_len);
/**
* \brief Check an authentication tag in constant time with a previous check.
*
* \param plaintext Points to the plaintext data.
* \param plaintext_len Length of the plaintext in bytes.
* \param tag1 First tag to compare.
* \param tag2 Second tag to compare.
* \param tag_len Length of the tags in bytes.
* \param precheck Set to -1 if previous check succeeded or 0 if it failed.
*
* \return Returns -1 if the tag check failed or 0 if the check succeeded.
*
* If the tag check fails, then the \a plaintext will also be zeroed to
* prevent it from being used accidentally by the application when the
* ciphertext was invalid.
*
* This version can be used to incorporate other information about the
* correctness of the plaintext into the final result.
*/
int aead_check_tag_precheck
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned tag_len, int precheck);
#ifdef __cplusplus
}
#endif
#endif
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "romulus.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return romulus_m1_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return romulus_m1_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-skinny128.h"
#include "internal-skinnyutil.h"
#include "internal-util.h"
#include <string.h>
#if !defined(__AVR__)
STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk)
{
/* This function is used to fast-forward the TK1 tweak value
* to the value at the end of the key schedule for decryption.
*
* The tweak permutation repeats every 16 rounds, so SKINNY-128-256
* with 48 rounds does not need any fast forwarding applied.
* SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds
* are equivalent to applying the permutation 8 times:
*
* PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12]
*/
uint32_t row0 = tk[0];
uint32_t row1 = tk[1];
uint32_t row2 = tk[2];
uint32_t row3 = tk[3];
tk[0] = ((row1 >> 8) & 0x0000FFFFU) |
((row0 >> 8) & 0x00FF0000U) |
((row0 << 8) & 0xFF000000U);
tk[1] = ((row1 >> 24) & 0x000000FFU) |
((row0 << 8) & 0x00FFFF00U) |
((row1 << 24) & 0xFF000000U);
tk[2] = ((row3 >> 8) & 0x0000FFFFU) |
((row2 >> 8) & 0x00FF0000U) |
((row2 << 8) & 0xFF000000U);
tk[3] = ((row3 >> 24) & 0x000000FFU) |
((row2 << 8) & 0x00FFFF00U) |
((row3 << 24) & 0xFF000000U);
}
void skinny_128_384_init
(skinny_128_384_key_schedule_t *ks, const unsigned char key[48])
{
#if !SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint32_t *schedule;
unsigned round;
uint8_t rc;
#endif
#if SKINNY_128_SMALL_SCHEDULE
/* Copy the input key as-is when using the small key schedule version */
memcpy(ks->TK1, key, sizeof(ks->TK1));
memcpy(ks->TK2, key + 16, sizeof(ks->TK2));
memcpy(ks->TK3, key + 32, sizeof(ks->TK3));
#else
/* Set the initial states of TK1, TK2, and TK3 */
memcpy(ks->TK1, key, 16);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
TK3[0] = le_load_word32(key + 32);
TK3[1] = le_load_word32(key + 36);
TK3[2] = le_load_word32(key + 40);
TK3[3] = le_load_word32(key + 44);
/* Set up the key schedule using TK2 and TK3. TK1 is not added
* to the key schedule because we will derive that part of the
* schedule during encryption operations */
schedule = ks->k;
rc = 0;
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) {
/* XOR the round constants with the current schedule words.
* The round constants for the 3rd and 4th rows are
* fixed and will be applied during encryption. */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F);
schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4);
/* Permute TK2 and TK3 for the next round */
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
/* Apply the LFSR's to TK2 and TK3 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
}
#endif
}
void skinny_128_384_encrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 for the next round */
skinny128_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_decrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint8_t rc = 0x15;
#else
const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]);
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1 */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Permute TK1 to fast-forward it to the end of the key schedule */
skinny128_fast_forward_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_fast_forward_tk(TK2);
skinny128_fast_forward_tk(TK3);
for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) {
/* Also fast-forward the LFSR's on every byte of TK2 and TK3 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR2(TK2[2]);
skinny128_LFSR2(TK2[3]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
skinny128_LFSR3(TK3[2]);
skinny128_LFSR3(TK3[3]);
}
#endif
/* Perform all decryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Inverse permutation on TK1 for this round */
skinny128_inv_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_inv_permute_tk(TK2);
skinny128_inv_permute_tk(TK3);
skinny128_LFSR3(TK2[2]);
skinny128_LFSR3(TK2[3]);
skinny128_LFSR2(TK3[2]);
skinny128_LFSR2(TK3[3]);
#endif
/* Inverse mix of the columns */
temp = s3;
s3 = s0;
s0 = s1;
s1 = s2;
s3 ^= temp;
s2 = temp ^ s0;
s1 ^= s2;
/* Inverse shift of the rows */
s1 = leftRotate24(s1);
s2 = leftRotate16(s2);
s3 = leftRotate8(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20);
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
schedule -= 2;
#endif
s2 ^= 0x02;
/* Apply the inverse of the S-box to all bytes in the state */
skinny128_inv_sbox(s0);
skinny128_inv_sbox(s1);
skinny128_inv_sbox(s2);
skinny128_inv_sbox(s3);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK3[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
TK2[0] = le_load_word32(tk2);
TK2[1] = le_load_word32(tk2 + 4);
TK2[2] = le_load_word32(tk2 + 8);
TK2[3] = le_load_word32(tk2 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0] ^ TK2[0];
s1 ^= schedule[1] ^ TK1[1] ^ TK2[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK3);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_encrypt_tk_full
(const unsigned char key[48], unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
uint32_t TK3[4];
uint32_t temp;
unsigned round;
uint8_t rc = 0;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakey */
TK1[0] = le_load_word32(key);
TK1[1] = le_load_word32(key + 4);
TK1[2] = le_load_word32(key + 8);
TK1[3] = le_load_word32(key + 12);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
TK3[0] = le_load_word32(key + 32);
TK3[1] = le_load_word32(key + 36);
TK3[2] = le_load_word32(key + 40);
TK3[3] = le_load_word32(key + 44);
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1, TK2, and TK3 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_init
(skinny_128_256_key_schedule_t *ks, const unsigned char key[32])
{
#if !SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t *schedule;
unsigned round;
uint8_t rc;
#endif
#if SKINNY_128_SMALL_SCHEDULE
/* Copy the input key as-is when using the small key schedule version */
memcpy(ks->TK1, key, sizeof(ks->TK1));
memcpy(ks->TK2, key + 16, sizeof(ks->TK2));
#else
/* Set the initial states of TK1 and TK2 */
memcpy(ks->TK1, key, 16);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
/* Set up the key schedule using TK2. TK1 is not added
* to the key schedule because we will derive that part of the
* schedule during encryption operations */
schedule = ks->k;
rc = 0;
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) {
/* XOR the round constants with the current schedule words.
* The round constants for the 3rd and 4th rows are
* fixed and will be applied during encryption. */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
schedule[0] = TK2[0] ^ (rc & 0x0F);
schedule[1] = TK2[1] ^ (rc >> 4);
/* Permute TK2 for the next round */
skinny128_permute_tk(TK2);
/* Apply the LFSR to TK2 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
}
#endif
}
void skinny_128_256_encrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1 */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_decrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint8_t rc = 0x09;
#else
const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]);
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1.
* There is no need to fast-forward TK1 because the value at
* the end of the key schedule is the same as at the start */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) {
/* Also fast-forward the LFSR's on every byte of TK2 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR2(TK2[2]);
skinny128_LFSR2(TK2[3]);
}
#endif
/* Perform all decryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Inverse permutation on TK1 for this round */
skinny128_inv_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_inv_permute_tk(TK2);
skinny128_LFSR3(TK2[2]);
skinny128_LFSR3(TK2[3]);
#endif
/* Inverse mix of the columns */
temp = s3;
s3 = s0;
s0 = s1;
s1 = s2;
s3 ^= temp;
s2 = temp ^ s0;
s1 ^= s2;
/* Inverse shift of the rows */
s1 = leftRotate24(s1);
s2 = leftRotate16(s2);
s3 = leftRotate8(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20);
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
schedule -= 2;
#endif
s2 ^= 0x02;
/* Apply the inverse of the S-box to all bytes in the state */
skinny128_inv_sbox(s0);
skinny128_inv_sbox(s1);
skinny128_inv_sbox(s2);
skinny128_inv_sbox(s3);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_encrypt_tk_full
(const unsigned char key[32], unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
uint32_t temp;
unsigned round;
uint8_t rc = 0;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakey */
TK1[0] = le_load_word32(key);
TK1[1] = le_load_word32(key + 4);
TK1[2] = le_load_word32(key + 8);
TK1[3] = le_load_word32(key + 12);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
#else /* __AVR__ */
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2)
{
memcpy(ks->TK2, tk2, 16);
skinny_128_384_encrypt(ks, output, input);
}
#endif /* __AVR__ */
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_SKINNY128_H
#define LW_INTERNAL_SKINNY128_H
/**
* \file internal-skinny128.h
* \brief SKINNY-128 block cipher family.
*
* References: https://eprint.iacr.org/2016/660.pdf,
* https://sites.google.com/site/skinnycipher/
*/
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* \def SKINNY_128_SMALL_SCHEDULE
* \brief Defined to 1 to use the small key schedule version of SKINNY-128.
*/
#if defined(__AVR__)
#define SKINNY_128_SMALL_SCHEDULE 1
#else
#define SKINNY_128_SMALL_SCHEDULE 0
#endif
/**
* \brief Size of a block for SKINNY-128 block ciphers.
*/
#define SKINNY_128_BLOCK_SIZE 16
/**
* \brief Number of rounds for SKINNY-128-384.
*/
#define SKINNY_128_384_ROUNDS 56
/**
* \brief Structure of the key schedule for SKINNY-128-384.
*/
typedef struct
{
/** TK1 for the tweakable part of the key schedule */
uint8_t TK1[16];
#if SKINNY_128_SMALL_SCHEDULE
/** TK2 for the small key schedule */
uint8_t TK2[16];
/** TK3 for the small key schedule */
uint8_t TK3[16];
#else
/** Words of the full key schedule */
uint32_t k[SKINNY_128_384_ROUNDS * 2];
#endif
} skinny_128_384_key_schedule_t;
/**
* \brief Initializes the key schedule for SKINNY-128-384.
*
* \param ks Points to the key schedule to initialize.
* \param key Points to the key data.
*/
void skinny_128_384_init
(skinny_128_384_key_schedule_t *ks, const unsigned char key[48]);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_384_encrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Decrypts a 128-bit block with SKINNY-128-384.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_384_decrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly
* provided TK2 value.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
* \param tk2 TK2 value that should be updated on the fly.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when both TK1 and TK2 change from block to block.
* When the key is initialized with skinny_128_384_init(), the TK2 part of
* the key value should be set to zero.
*
* \note Some versions of this function may modify the key schedule to
* copy tk2 into place.
*/
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384 and a
* fully specified tweakey value.
*
* \param key Points to the 384-bit tweakey value.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when the entire tweakey changes from block to
* block. It is slower than the other versions of SKINNY-128-384 but
* more memory-efficient.
*/
void skinny_128_384_encrypt_tk_full
(const unsigned char key[48], unsigned char *output,
const unsigned char *input);
/**
* \brief Number of rounds for SKINNY-128-256.
*/
#define SKINNY_128_256_ROUNDS 48
/**
* \brief Structure of the key schedule for SKINNY-128-256.
*/
typedef struct
{
/** TK1 for the tweakable part of the key schedule */
uint8_t TK1[16];
#if SKINNY_128_SMALL_SCHEDULE
/** TK2 for the small key schedule */
uint8_t TK2[16];
#else
/** Words of the full key schedule */
uint32_t k[SKINNY_128_256_ROUNDS * 2];
#endif
} skinny_128_256_key_schedule_t;
/**
* \brief Initializes the key schedule for SKINNY-128-256.
*
* \param ks Points to the key schedule to initialize.
* \param key Points to the key data.
*/
void skinny_128_256_init
(skinny_128_256_key_schedule_t *ks, const unsigned char key[32]);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-256.
*
* \param ks Points to the SKINNY-128-256 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_256_encrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Decrypts a 128-bit block with SKINNY-128-256.
*
* \param ks Points to the SKINNY-128-256 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_256_decrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-256 and a
* fully specified tweakey value.
*
* \param key Points to the 256-bit tweakey value.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when the entire tweakey changes from block to
* block. It is slower than the other versions of SKINNY-128-256 but
* more memory-efficient.
*/
void skinny_128_256_encrypt_tk_full
(const unsigned char key[32], unsigned char *output,
const unsigned char *input);
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_SKINNYUTIL_H
#define LW_INTERNAL_SKINNYUTIL_H
/**
* \file internal-skinnyutil.h
* \brief Utilities to help implement SKINNY and its variants.
*/
#include "internal-util.h"
#ifdef __cplusplus
extern "C" {
#endif
/** @cond skinnyutil */
/* Utilities for implementing SKINNY-128 */
#define skinny128_LFSR2(x) \
do { \
uint32_t _x = (x); \
(x) = ((_x << 1) & 0xFEFEFEFEU) ^ \
(((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \
} while (0)
#define skinny128_LFSR3(x) \
do { \
uint32_t _x = (x); \
(x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \
(((_x << 7) ^ (_x << 1)) & 0x80808080U); \
} while (0)
/* LFSR2 and LFSR3 are inverses of each other */
#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x)
#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x)
#define skinny128_permute_tk(tk) \
do { \
/* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \
uint32_t row2 = tk[2]; \
uint32_t row3 = tk[3]; \
tk[2] = tk[0]; \
tk[3] = tk[1]; \
row3 = (row3 << 16) | (row3 >> 16); \
tk[0] = ((row2 >> 8) & 0x000000FFU) | \
((row2 << 16) & 0x00FF0000U) | \
( row3 & 0xFF00FF00U); \
tk[1] = ((row2 >> 16) & 0x000000FFU) | \
(row2 & 0xFF000000U) | \
((row3 << 8) & 0x0000FF00U) | \
( row3 & 0x00FF0000U); \
} while (0)
#define skinny128_inv_permute_tk(tk) \
do { \
/* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \
uint32_t row0 = tk[0]; \
uint32_t row1 = tk[1]; \
tk[0] = tk[2]; \
tk[1] = tk[3]; \
tk[2] = ((row0 >> 16) & 0x000000FFU) | \
((row0 << 8) & 0x0000FF00U) | \
((row1 << 16) & 0x00FF0000U) | \
( row1 & 0xFF000000U); \
tk[3] = ((row0 >> 16) & 0x0000FF00U) | \
((row0 << 16) & 0xFF000000U) | \
((row1 >> 16) & 0x000000FFU) | \
((row1 << 8) & 0x00FF0000U); \
} while (0)
/*
* Apply the SKINNY sbox. The original version from the specification is
* equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
* #define SBOX_SWAP(x)
* (((x) & 0xF9F9F9F9U) |
* (((x) >> 1) & 0x02020202U) |
* (((x) << 1) & 0x04040404U))
* #define SBOX_PERMUTE(x)
* ((((x) & 0x01010101U) << 2) |
* (((x) & 0x06060606U) << 5) |
* (((x) & 0x20202020U) >> 5) |
* (((x) & 0xC8C8C8C8U) >> 2) |
* (((x) & 0x10101010U) >> 1))
*
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* return SBOX_SWAP(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one
* final permuatation. This reduces the number of shift operations.
*/
#define skinny128_sbox(x) \
do { \
uint32_t y; \
\
/* Mix the bits */ \
x = ~x; \
x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \
y = (((x << 5) & (x << 1)) & 0x20202020U); \
x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \
y = (((x << 2) & (x << 1)) & 0x80808080U); \
x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \
y = (((x >> 5) & (x << 1)) & 0x04040404U); \
x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \
x = ~x; \
\
/* Permutation generated by http://programming.sirrida.de/calcperm.php */ \
/* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \
x = ((x & 0x08080808U) << 1) | \
((x & 0x32323232U) << 2) | \
((x & 0x01010101U) << 5) | \
((x & 0x80808080U) >> 6) | \
((x & 0x40404040U) >> 4) | \
((x & 0x04040404U) >> 2); \
} while (0)
/*
* Apply the inverse of the SKINNY sbox. The original version from the
* specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
* #define SBOX_SWAP(x)
* (((x) & 0xF9F9F9F9U) |
* (((x) >> 1) & 0x02020202U) |
* (((x) << 1) & 0x04040404U))
* #define SBOX_PERMUTE_INV(x)
* ((((x) & 0x08080808U) << 1) |
* (((x) & 0x32323232U) << 2) |
* (((x) & 0x01010101U) << 5) |
* (((x) & 0xC0C0C0C0U) >> 5) |
* (((x) & 0x04040404U) >> 2))
*
* x = SBOX_SWAP(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* return SBOX_MIX(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one
* final permuatation. This reduces the number of shift operations.
*/
#define skinny128_inv_sbox(x) \
do { \
uint32_t y; \
\
/* Mix the bits */ \
x = ~x; \
y = (((x >> 1) & (x >> 3)) & 0x01010101U); \
x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \
y = (((x >> 6) & (x >> 1)) & 0x02020202U); \
x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \
y = (((x << 2) & (x << 1)) & 0x80808080U); \
x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \
y = (((x << 5) & (x << 1)) & 0x20202020U); \
x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \
x = ~x; \
\
/* Permutation generated by http://programming.sirrida.de/calcperm.php */ \
/* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \
x = ((x & 0x01010101U) << 2) | \
((x & 0x04040404U) << 4) | \
((x & 0x02020202U) << 6) | \
((x & 0x20202020U) >> 5) | \
((x & 0xC8C8C8C8U) >> 2) | \
((x & 0x10101010U) >> 1); \
} while (0)
/* Utilities for implementing SKINNY-64 */
#define skinny64_LFSR2(x) \
do { \
uint16_t _x = (x); \
(x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \
} while (0)
#define skinny64_LFSR3(x) \
do { \
uint16_t _x = (x); \
(x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \
} while (0)
/* LFSR2 and LFSR3 are inverses of each other */
#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x)
#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x)
#define skinny64_permute_tk(tk) \
do { \
/* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \
uint16_t row2 = tk[2]; \
uint16_t row3 = tk[3]; \
tk[2] = tk[0]; \
tk[3] = tk[1]; \
row3 = (row3 << 8) | (row3 >> 8); \
tk[0] = ((row2 << 4) & 0xF000U) | \
((row2 >> 8) & 0x00F0U) | \
( row3 & 0x0F0FU); \
tk[1] = ((row2 << 8) & 0xF000U) | \
((row3 >> 4) & 0x0F00U) | \
( row3 & 0x00F0U) | \
( row2 & 0x000FU); \
} while (0)
#define skinny64_inv_permute_tk(tk) \
do { \
/* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \
uint16_t row0 = tk[0]; \
uint16_t row1 = tk[1]; \
tk[0] = tk[2]; \
tk[1] = tk[3]; \
tk[2] = ((row0 << 8) & 0xF000U) | \
((row0 >> 4) & 0x0F00U) | \
((row1 >> 8) & 0x00F0U) | \
( row1 & 0x000FU); \
tk[3] = ((row1 << 8) & 0xF000U) | \
((row0 << 8) & 0x0F00U) | \
((row1 >> 4) & 0x00F0U) | \
((row0 >> 8) & 0x000FU); \
} while (0)
/*
* Apply the SKINNY-64 sbox. The original version from the
* specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x))
* #define SBOX_SHIFT(x)
* ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U))
*
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* return SBOX_MIX(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_SHIFT steps to be performed with one final rotation.
* This reduces the number of required shift operations from 14 to 10.
*
* We can further reduce the number of NOT operations from 4 to 2
* using the technique from https://github.com/kste/skinny_avx to
* convert NOR-XOR operations into AND-XOR operations by converting
* the S-box into its NOT-inverse.
*/
#define skinny64_sbox(x) \
do { \
x = ~x; \
x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \
x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \
x = ~x; \
x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \
} while (0)
/*
* Apply the inverse of the SKINNY-64 sbox. The original version
* from the specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x))
* #define SBOX_SHIFT_INV(x)
* ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U))
*
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* return SBOX_MIX(x);
*/
#define skinny64_inv_sbox(x) \
do { \
x = ~x; \
x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \
x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \
x = ~x; \
x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \
} while (0)
/** @endcond */
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_UTIL_H
#define LW_INTERNAL_UTIL_H
#include <stdint.h>
/* Figure out how to inline functions using this C compiler */
#if defined(__STDC__) && __STDC_VERSION__ >= 199901L
#define STATIC_INLINE static inline
#elif defined(__GNUC__) || defined(__clang__)
#define STATIC_INLINE static __inline__
#else
#define STATIC_INLINE static
#endif
/* Try to figure out whether the CPU is little-endian or big-endian.
* May need to modify this to include new compiler-specific defines.
* Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your
* compiler flags when you compile this library */
#if defined(__x86_64) || defined(__x86_64__) || \
defined(__i386) || defined(__i386__) || \
defined(__AVR__) || defined(__arm) || defined(__arm__) || \
defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \
defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \
defined(__LITTLE_ENDIAN__)
#define LW_UTIL_LITTLE_ENDIAN 1
#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \
defined(__BIG_ENDIAN__)
/* Big endian */
#else
#error "Cannot determine the endianess of this platform"
#endif
/* Helper macros to load and store values while converting endian-ness */
/* Load a big-endian 32-bit word from a byte buffer */
#define be_load_word32(ptr) \
((((uint32_t)((ptr)[0])) << 24) | \
(((uint32_t)((ptr)[1])) << 16) | \
(((uint32_t)((ptr)[2])) << 8) | \
((uint32_t)((ptr)[3])))
/* Store a big-endian 32-bit word into a byte buffer */
#define be_store_word32(ptr, x) \
do { \
uint32_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 24); \
(ptr)[1] = (uint8_t)(_x >> 16); \
(ptr)[2] = (uint8_t)(_x >> 8); \
(ptr)[3] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 32-bit word from a byte buffer */
#define le_load_word32(ptr) \
((((uint32_t)((ptr)[3])) << 24) | \
(((uint32_t)((ptr)[2])) << 16) | \
(((uint32_t)((ptr)[1])) << 8) | \
((uint32_t)((ptr)[0])))
/* Store a little-endian 32-bit word into a byte buffer */
#define le_store_word32(ptr, x) \
do { \
uint32_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
(ptr)[2] = (uint8_t)(_x >> 16); \
(ptr)[3] = (uint8_t)(_x >> 24); \
} while (0)
/* Load a big-endian 64-bit word from a byte buffer */
#define be_load_word64(ptr) \
((((uint64_t)((ptr)[0])) << 56) | \
(((uint64_t)((ptr)[1])) << 48) | \
(((uint64_t)((ptr)[2])) << 40) | \
(((uint64_t)((ptr)[3])) << 32) | \
(((uint64_t)((ptr)[4])) << 24) | \
(((uint64_t)((ptr)[5])) << 16) | \
(((uint64_t)((ptr)[6])) << 8) | \
((uint64_t)((ptr)[7])))
/* Store a big-endian 64-bit word into a byte buffer */
#define be_store_word64(ptr, x) \
do { \
uint64_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 56); \
(ptr)[1] = (uint8_t)(_x >> 48); \
(ptr)[2] = (uint8_t)(_x >> 40); \
(ptr)[3] = (uint8_t)(_x >> 32); \
(ptr)[4] = (uint8_t)(_x >> 24); \
(ptr)[5] = (uint8_t)(_x >> 16); \
(ptr)[6] = (uint8_t)(_x >> 8); \
(ptr)[7] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 64-bit word from a byte buffer */
#define le_load_word64(ptr) \
((((uint64_t)((ptr)[7])) << 56) | \
(((uint64_t)((ptr)[6])) << 48) | \
(((uint64_t)((ptr)[5])) << 40) | \
(((uint64_t)((ptr)[4])) << 32) | \
(((uint64_t)((ptr)[3])) << 24) | \
(((uint64_t)((ptr)[2])) << 16) | \
(((uint64_t)((ptr)[1])) << 8) | \
((uint64_t)((ptr)[0])))
/* Store a little-endian 64-bit word into a byte buffer */
#define le_store_word64(ptr, x) \
do { \
uint64_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
(ptr)[2] = (uint8_t)(_x >> 16); \
(ptr)[3] = (uint8_t)(_x >> 24); \
(ptr)[4] = (uint8_t)(_x >> 32); \
(ptr)[5] = (uint8_t)(_x >> 40); \
(ptr)[6] = (uint8_t)(_x >> 48); \
(ptr)[7] = (uint8_t)(_x >> 56); \
} while (0)
/* Load a big-endian 16-bit word from a byte buffer */
#define be_load_word16(ptr) \
((((uint16_t)((ptr)[0])) << 8) | \
((uint16_t)((ptr)[1])))
/* Store a big-endian 16-bit word into a byte buffer */
#define be_store_word16(ptr, x) \
do { \
uint16_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 8); \
(ptr)[1] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 16-bit word from a byte buffer */
#define le_load_word16(ptr) \
((((uint16_t)((ptr)[1])) << 8) | \
((uint16_t)((ptr)[0])))
/* Store a little-endian 16-bit word into a byte buffer */
#define le_store_word16(ptr, x) \
do { \
uint16_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
} while (0)
/* XOR a source byte buffer against a destination */
#define lw_xor_block(dest, src, len) \
do { \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest++ ^= *_src++; \
--_len; \
} \
} while (0)
/* XOR two source byte buffers and put the result in a destination buffer */
#define lw_xor_block_2_src(dest, src1, src2, len) \
do { \
unsigned char *_dest = (dest); \
const unsigned char *_src1 = (src1); \
const unsigned char *_src2 = (src2); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest++ = *_src1++ ^ *_src2++; \
--_len; \
} \
} while (0)
/* XOR a source byte buffer against a destination and write to another
* destination at the same time */
#define lw_xor_block_2_dest(dest2, dest, src, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest2++ = (*_dest++ ^= *_src++); \
--_len; \
} \
} while (0)
/* XOR two byte buffers and write to a destination which at the same
* time copying the contents of src2 to dest2 */
#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src1 = (src1); \
const unsigned char *_src2 = (src2); \
unsigned _len = (len); \
while (_len > 0) { \
unsigned char _temp = *_src2++; \
*_dest2++ = _temp; \
*_dest++ = *_src1++ ^ _temp; \
--_len; \
} \
} while (0)
/* XOR a source byte buffer against a destination and write to another
* destination at the same time. This version swaps the source value
* into the "dest" buffer */
#define lw_xor_block_swap(dest2, dest, src, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
unsigned char _temp = *_src++; \
*_dest2++ = *_dest ^ _temp; \
*_dest++ = _temp; \
--_len; \
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
#define leftRotate(a, bits) \
(__extension__ ({ \
uint32_t _temp = (a); \
(_temp << (bits)) | (_temp >> (32 - (bits))); \
}))
/* Generic right rotate */
#define rightRotate(a, bits) \
(__extension__ ({ \
uint32_t _temp = (a); \
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
#define leftRotate2(a) (leftRotate((a), 2))
#define leftRotate3(a) (leftRotate((a), 3))
#define leftRotate4(a) (leftRotate((a), 4))
#define leftRotate5(a) (leftRotate((a), 5))
#define leftRotate6(a) (leftRotate((a), 6))
#define leftRotate7(a) (leftRotate((a), 7))
#define leftRotate8(a) (leftRotate((a), 8))
#define leftRotate9(a) (leftRotate((a), 9))
#define leftRotate10(a) (leftRotate((a), 10))
#define leftRotate11(a) (leftRotate((a), 11))
#define leftRotate12(a) (leftRotate((a), 12))
#define leftRotate13(a) (leftRotate((a), 13))
#define leftRotate14(a) (leftRotate((a), 14))
#define leftRotate15(a) (leftRotate((a), 15))
#define leftRotate16(a) (leftRotate((a), 16))
#define leftRotate17(a) (leftRotate((a), 17))
#define leftRotate18(a) (leftRotate((a), 18))
#define leftRotate19(a) (leftRotate((a), 19))
#define leftRotate20(a) (leftRotate((a), 20))
#define leftRotate21(a) (leftRotate((a), 21))
#define leftRotate22(a) (leftRotate((a), 22))
#define leftRotate23(a) (leftRotate((a), 23))
#define leftRotate24(a) (leftRotate((a), 24))
#define leftRotate25(a) (leftRotate((a), 25))
#define leftRotate26(a) (leftRotate((a), 26))
#define leftRotate27(a) (leftRotate((a), 27))
#define leftRotate28(a) (leftRotate((a), 28))
#define leftRotate29(a) (leftRotate((a), 29))
#define leftRotate30(a) (leftRotate((a), 30))
#define leftRotate31(a) (leftRotate((a), 31))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1(a) (rightRotate((a), 1))
#define rightRotate2(a) (rightRotate((a), 2))
#define rightRotate3(a) (rightRotate((a), 3))
#define rightRotate4(a) (rightRotate((a), 4))
#define rightRotate5(a) (rightRotate((a), 5))
#define rightRotate6(a) (rightRotate((a), 6))
#define rightRotate7(a) (rightRotate((a), 7))
#define rightRotate8(a) (rightRotate((a), 8))
#define rightRotate9(a) (rightRotate((a), 9))
#define rightRotate10(a) (rightRotate((a), 10))
#define rightRotate11(a) (rightRotate((a), 11))
#define rightRotate12(a) (rightRotate((a), 12))
#define rightRotate13(a) (rightRotate((a), 13))
#define rightRotate14(a) (rightRotate((a), 14))
#define rightRotate15(a) (rightRotate((a), 15))
#define rightRotate16(a) (rightRotate((a), 16))
#define rightRotate17(a) (rightRotate((a), 17))
#define rightRotate18(a) (rightRotate((a), 18))
#define rightRotate19(a) (rightRotate((a), 19))
#define rightRotate20(a) (rightRotate((a), 20))
#define rightRotate21(a) (rightRotate((a), 21))
#define rightRotate22(a) (rightRotate((a), 22))
#define rightRotate23(a) (rightRotate((a), 23))
#define rightRotate24(a) (rightRotate((a), 24))
#define rightRotate25(a) (rightRotate((a), 25))
#define rightRotate26(a) (rightRotate((a), 26))
#define rightRotate27(a) (rightRotate((a), 27))
#define rightRotate28(a) (rightRotate((a), 28))
#define rightRotate29(a) (rightRotate((a), 29))
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
#define leftRotate_64(a, bits) \
(__extension__ ({ \
uint64_t _temp = (a); \
(_temp << (bits)) | (_temp >> (64 - (bits))); \
}))
/* Generic right rotate */
#define rightRotate_64(a, bits) \
(__extension__ ({ \
uint64_t _temp = (a); \
(_temp >> (bits)) | (_temp << (64 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_64(a) (leftRotate_64((a), 1))
#define leftRotate2_64(a) (leftRotate_64((a), 2))
#define leftRotate3_64(a) (leftRotate_64((a), 3))
#define leftRotate4_64(a) (leftRotate_64((a), 4))
#define leftRotate5_64(a) (leftRotate_64((a), 5))
#define leftRotate6_64(a) (leftRotate_64((a), 6))
#define leftRotate7_64(a) (leftRotate_64((a), 7))
#define leftRotate8_64(a) (leftRotate_64((a), 8))
#define leftRotate9_64(a) (leftRotate_64((a), 9))
#define leftRotate10_64(a) (leftRotate_64((a), 10))
#define leftRotate11_64(a) (leftRotate_64((a), 11))
#define leftRotate12_64(a) (leftRotate_64((a), 12))
#define leftRotate13_64(a) (leftRotate_64((a), 13))
#define leftRotate14_64(a) (leftRotate_64((a), 14))
#define leftRotate15_64(a) (leftRotate_64((a), 15))
#define leftRotate16_64(a) (leftRotate_64((a), 16))
#define leftRotate17_64(a) (leftRotate_64((a), 17))
#define leftRotate18_64(a) (leftRotate_64((a), 18))
#define leftRotate19_64(a) (leftRotate_64((a), 19))
#define leftRotate20_64(a) (leftRotate_64((a), 20))
#define leftRotate21_64(a) (leftRotate_64((a), 21))
#define leftRotate22_64(a) (leftRotate_64((a), 22))
#define leftRotate23_64(a) (leftRotate_64((a), 23))
#define leftRotate24_64(a) (leftRotate_64((a), 24))
#define leftRotate25_64(a) (leftRotate_64((a), 25))
#define leftRotate26_64(a) (leftRotate_64((a), 26))
#define leftRotate27_64(a) (leftRotate_64((a), 27))
#define leftRotate28_64(a) (leftRotate_64((a), 28))
#define leftRotate29_64(a) (leftRotate_64((a), 29))
#define leftRotate30_64(a) (leftRotate_64((a), 30))
#define leftRotate31_64(a) (leftRotate_64((a), 31))
#define leftRotate32_64(a) (leftRotate_64((a), 32))
#define leftRotate33_64(a) (leftRotate_64((a), 33))
#define leftRotate34_64(a) (leftRotate_64((a), 34))
#define leftRotate35_64(a) (leftRotate_64((a), 35))
#define leftRotate36_64(a) (leftRotate_64((a), 36))
#define leftRotate37_64(a) (leftRotate_64((a), 37))
#define leftRotate38_64(a) (leftRotate_64((a), 38))
#define leftRotate39_64(a) (leftRotate_64((a), 39))
#define leftRotate40_64(a) (leftRotate_64((a), 40))
#define leftRotate41_64(a) (leftRotate_64((a), 41))
#define leftRotate42_64(a) (leftRotate_64((a), 42))
#define leftRotate43_64(a) (leftRotate_64((a), 43))
#define leftRotate44_64(a) (leftRotate_64((a), 44))
#define leftRotate45_64(a) (leftRotate_64((a), 45))
#define leftRotate46_64(a) (leftRotate_64((a), 46))
#define leftRotate47_64(a) (leftRotate_64((a), 47))
#define leftRotate48_64(a) (leftRotate_64((a), 48))
#define leftRotate49_64(a) (leftRotate_64((a), 49))
#define leftRotate50_64(a) (leftRotate_64((a), 50))
#define leftRotate51_64(a) (leftRotate_64((a), 51))
#define leftRotate52_64(a) (leftRotate_64((a), 52))
#define leftRotate53_64(a) (leftRotate_64((a), 53))
#define leftRotate54_64(a) (leftRotate_64((a), 54))
#define leftRotate55_64(a) (leftRotate_64((a), 55))
#define leftRotate56_64(a) (leftRotate_64((a), 56))
#define leftRotate57_64(a) (leftRotate_64((a), 57))
#define leftRotate58_64(a) (leftRotate_64((a), 58))
#define leftRotate59_64(a) (leftRotate_64((a), 59))
#define leftRotate60_64(a) (leftRotate_64((a), 60))
#define leftRotate61_64(a) (leftRotate_64((a), 61))
#define leftRotate62_64(a) (leftRotate_64((a), 62))
#define leftRotate63_64(a) (leftRotate_64((a), 63))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_64(a) (rightRotate_64((a), 1))
#define rightRotate2_64(a) (rightRotate_64((a), 2))
#define rightRotate3_64(a) (rightRotate_64((a), 3))
#define rightRotate4_64(a) (rightRotate_64((a), 4))
#define rightRotate5_64(a) (rightRotate_64((a), 5))
#define rightRotate6_64(a) (rightRotate_64((a), 6))
#define rightRotate7_64(a) (rightRotate_64((a), 7))
#define rightRotate8_64(a) (rightRotate_64((a), 8))
#define rightRotate9_64(a) (rightRotate_64((a), 9))
#define rightRotate10_64(a) (rightRotate_64((a), 10))
#define rightRotate11_64(a) (rightRotate_64((a), 11))
#define rightRotate12_64(a) (rightRotate_64((a), 12))
#define rightRotate13_64(a) (rightRotate_64((a), 13))
#define rightRotate14_64(a) (rightRotate_64((a), 14))
#define rightRotate15_64(a) (rightRotate_64((a), 15))
#define rightRotate16_64(a) (rightRotate_64((a), 16))
#define rightRotate17_64(a) (rightRotate_64((a), 17))
#define rightRotate18_64(a) (rightRotate_64((a), 18))
#define rightRotate19_64(a) (rightRotate_64((a), 19))
#define rightRotate20_64(a) (rightRotate_64((a), 20))
#define rightRotate21_64(a) (rightRotate_64((a), 21))
#define rightRotate22_64(a) (rightRotate_64((a), 22))
#define rightRotate23_64(a) (rightRotate_64((a), 23))
#define rightRotate24_64(a) (rightRotate_64((a), 24))
#define rightRotate25_64(a) (rightRotate_64((a), 25))
#define rightRotate26_64(a) (rightRotate_64((a), 26))
#define rightRotate27_64(a) (rightRotate_64((a), 27))
#define rightRotate28_64(a) (rightRotate_64((a), 28))
#define rightRotate29_64(a) (rightRotate_64((a), 29))
#define rightRotate30_64(a) (rightRotate_64((a), 30))
#define rightRotate31_64(a) (rightRotate_64((a), 31))
#define rightRotate32_64(a) (rightRotate_64((a), 32))
#define rightRotate33_64(a) (rightRotate_64((a), 33))
#define rightRotate34_64(a) (rightRotate_64((a), 34))
#define rightRotate35_64(a) (rightRotate_64((a), 35))
#define rightRotate36_64(a) (rightRotate_64((a), 36))
#define rightRotate37_64(a) (rightRotate_64((a), 37))
#define rightRotate38_64(a) (rightRotate_64((a), 38))
#define rightRotate39_64(a) (rightRotate_64((a), 39))
#define rightRotate40_64(a) (rightRotate_64((a), 40))
#define rightRotate41_64(a) (rightRotate_64((a), 41))
#define rightRotate42_64(a) (rightRotate_64((a), 42))
#define rightRotate43_64(a) (rightRotate_64((a), 43))
#define rightRotate44_64(a) (rightRotate_64((a), 44))
#define rightRotate45_64(a) (rightRotate_64((a), 45))
#define rightRotate46_64(a) (rightRotate_64((a), 46))
#define rightRotate47_64(a) (rightRotate_64((a), 47))
#define rightRotate48_64(a) (rightRotate_64((a), 48))
#define rightRotate49_64(a) (rightRotate_64((a), 49))
#define rightRotate50_64(a) (rightRotate_64((a), 50))
#define rightRotate51_64(a) (rightRotate_64((a), 51))
#define rightRotate52_64(a) (rightRotate_64((a), 52))
#define rightRotate53_64(a) (rightRotate_64((a), 53))
#define rightRotate54_64(a) (rightRotate_64((a), 54))
#define rightRotate55_64(a) (rightRotate_64((a), 55))
#define rightRotate56_64(a) (rightRotate_64((a), 56))
#define rightRotate57_64(a) (rightRotate_64((a), 57))
#define rightRotate58_64(a) (rightRotate_64((a), 58))
#define rightRotate59_64(a) (rightRotate_64((a), 59))
#define rightRotate60_64(a) (rightRotate_64((a), 60))
#define rightRotate61_64(a) (rightRotate_64((a), 61))
#define rightRotate62_64(a) (rightRotate_64((a), 62))
#define rightRotate63_64(a) (rightRotate_64((a), 63))
/* Rotate a 16-bit value left by a number of bits */
#define leftRotate_16(a, bits) \
(__extension__ ({ \
uint16_t _temp = (a); \
(_temp << (bits)) | (_temp >> (16 - (bits))); \
}))
/* Rotate a 16-bit value right by a number of bits */
#define rightRotate_16(a, bits) \
(__extension__ ({ \
uint16_t _temp = (a); \
(_temp >> (bits)) | (_temp << (16 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_16(a) (leftRotate_16((a), 1))
#define leftRotate2_16(a) (leftRotate_16((a), 2))
#define leftRotate3_16(a) (leftRotate_16((a), 3))
#define leftRotate4_16(a) (leftRotate_16((a), 4))
#define leftRotate5_16(a) (leftRotate_16((a), 5))
#define leftRotate6_16(a) (leftRotate_16((a), 6))
#define leftRotate7_16(a) (leftRotate_16((a), 7))
#define leftRotate8_16(a) (leftRotate_16((a), 8))
#define leftRotate9_16(a) (leftRotate_16((a), 9))
#define leftRotate10_16(a) (leftRotate_16((a), 10))
#define leftRotate11_16(a) (leftRotate_16((a), 11))
#define leftRotate12_16(a) (leftRotate_16((a), 12))
#define leftRotate13_16(a) (leftRotate_16((a), 13))
#define leftRotate14_16(a) (leftRotate_16((a), 14))
#define leftRotate15_16(a) (leftRotate_16((a), 15))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_16(a) (rightRotate_16((a), 1))
#define rightRotate2_16(a) (rightRotate_16((a), 2))
#define rightRotate3_16(a) (rightRotate_16((a), 3))
#define rightRotate4_16(a) (rightRotate_16((a), 4))
#define rightRotate5_16(a) (rightRotate_16((a), 5))
#define rightRotate6_16(a) (rightRotate_16((a), 6))
#define rightRotate7_16(a) (rightRotate_16((a), 7))
#define rightRotate8_16(a) (rightRotate_16((a), 8))
#define rightRotate9_16(a) (rightRotate_16((a), 9))
#define rightRotate10_16(a) (rightRotate_16((a), 10))
#define rightRotate11_16(a) (rightRotate_16((a), 11))
#define rightRotate12_16(a) (rightRotate_16((a), 12))
#define rightRotate13_16(a) (rightRotate_16((a), 13))
#define rightRotate14_16(a) (rightRotate_16((a), 14))
#define rightRotate15_16(a) (rightRotate_16((a), 15))
/* Rotate an 8-bit value left by a number of bits */
#define leftRotate_8(a, bits) \
(__extension__ ({ \
uint8_t _temp = (a); \
(_temp << (bits)) | (_temp >> (8 - (bits))); \
}))
/* Rotate an 8-bit value right by a number of bits */
#define rightRotate_8(a, bits) \
(__extension__ ({ \
uint8_t _temp = (a); \
(_temp >> (bits)) | (_temp << (8 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_8(a) (leftRotate_8((a), 1))
#define leftRotate2_8(a) (leftRotate_8((a), 2))
#define leftRotate3_8(a) (leftRotate_8((a), 3))
#define leftRotate4_8(a) (leftRotate_8((a), 4))
#define leftRotate5_8(a) (leftRotate_8((a), 5))
#define leftRotate6_8(a) (leftRotate_8((a), 6))
#define leftRotate7_8(a) (leftRotate_8((a), 7))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_8(a) (rightRotate_8((a), 1))
#define rightRotate2_8(a) (rightRotate_8((a), 2))
#define rightRotate3_8(a) (rightRotate_8((a), 3))
#define rightRotate4_8(a) (rightRotate_8((a), 4))
#define rightRotate5_8(a) (rightRotate_8((a), 5))
#define rightRotate6_8(a) (rightRotate_8((a), 6))
#define rightRotate7_8(a) (rightRotate_8((a), 7))
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "romulus.h"
#include "internal-skinny128.h"
#include "internal-util.h"
#include <string.h>
aead_cipher_t const romulus_n1_cipher = {
"Romulus-N1",
ROMULUS_KEY_SIZE,
ROMULUS1_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_n1_aead_encrypt,
romulus_n1_aead_decrypt
};
aead_cipher_t const romulus_n2_cipher = {
"Romulus-N2",
ROMULUS_KEY_SIZE,
ROMULUS2_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_n2_aead_encrypt,
romulus_n2_aead_decrypt
};
aead_cipher_t const romulus_n3_cipher = {
"Romulus-N3",
ROMULUS_KEY_SIZE,
ROMULUS3_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_n3_aead_encrypt,
romulus_n3_aead_decrypt
};
aead_cipher_t const romulus_m1_cipher = {
"Romulus-M1",
ROMULUS_KEY_SIZE,
ROMULUS1_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_m1_aead_encrypt,
romulus_m1_aead_decrypt
};
aead_cipher_t const romulus_m2_cipher = {
"Romulus-M2",
ROMULUS_KEY_SIZE,
ROMULUS2_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_m2_aead_encrypt,
romulus_m2_aead_decrypt
};
aead_cipher_t const romulus_m3_cipher = {
"Romulus-M3",
ROMULUS_KEY_SIZE,
ROMULUS3_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_m3_aead_encrypt,
romulus_m3_aead_decrypt
};
/**
* \brief Limit on the number of bytes of message or associated data (128Mb).
*
* Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for
* payloads well into the petabyte range. It is unlikely that an embedded
* device will have that much memory to store a contiguous packet!
*
* Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper
* 24 bits are difficult to modify in the key schedule. So we only
* update the low 24 bits and leave the high 24 bits fixed.
*
* Romulus-N3 and Romulus-M3 use a 24-bit block counter.
*
* For all algorithms, we limit the block counter to 2^23 so that the block
* counter can never exceed 2^24 - 1.
*/
#define ROMULUS_DATA_LIMIT \
((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE))
/**
* \brief Initializes the key schedule for Romulus-N1 or Romulus-M1.
*
* \param ks Points to the key schedule to initialize.
* \param k Points to the 16 bytes of the key.
* \param npub Points to the 16 bytes of the nonce. May be NULL
* if the nonce will be updated on the fly.
*/
static void romulus1_init
(skinny_128_384_key_schedule_t *ks,
const unsigned char *k, const unsigned char *npub)
{
unsigned char TK[48];
TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */
memset(TK + 1, 0, 15);
if (npub)
memcpy(TK + 16, npub, 16);
else
memset(TK + 16, 0, 16);
memcpy(TK + 32, k, 16);
skinny_128_384_init(ks, TK);
}
/**
* \brief Initializes the key schedule for Romulus-N2 or Romulus-M2.
*
* \param ks Points to the key schedule to initialize.
* \param k Points to the 16 bytes of the key.
* \param npub Points to the 12 bytes of the nonce. May be NULL
* if the nonce will be updated on the fly.
*/
static void romulus2_init
(skinny_128_384_key_schedule_t *ks,
const unsigned char *k, const unsigned char *npub)
{
unsigned char TK[48];
TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */
if (npub) {
TK[1] = TK[2] = TK[3] = 0;
memcpy(TK + 4, npub, 12);
} else {
memset(TK + 1, 0, 15);
}
memcpy(TK + 16, k, 16);
TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */
memset(TK + 33, 0, 15);
skinny_128_384_init(ks, TK);
}
/**
* \brief Initializes the key schedule for Romulus-N3 or Romulus-M3.
*
* \param ks Points to the key schedule to initialize.
* \param k Points to the 16 bytes of the key.
* \param npub Points to the 12 bytes of the nonce. May be NULL
* if the nonce will be updated on the fly.
*/
static void romulus3_init
(skinny_128_256_key_schedule_t *ks,
const unsigned char *k, const unsigned char *npub)
{
unsigned char TK[32];
TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */
if (npub) {
TK[1] = TK[2] = TK[3] = 0;
memcpy(TK + 4, npub, 12);
} else {
memset(TK + 1, 0, 15);
}
memcpy(TK + 16, k, 16);
skinny_128_256_init(ks, TK);
}
/**
* \brief Sets the domain separation value for Romulus-N1 and M1.
*
* \param ks The key schedule to set the domain separation value into.
* \param domain The domain separation value.
*/
#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain))
/**
* \brief Sets the domain separation value for Romulus-N2 and M2.
*
* \param ks The key schedule to set the domain separation value into.
* \param domain The domain separation value.
*/
#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain))
/**
* \brief Sets the domain separation value for Romulus-N3 and M3.
*
* \param ks The key schedule to set the domain separation value into.
* \param domain The domain separation value.
*/
#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain))
/**
* \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1.
*
* \param TK1 Points to the TK1 part of the key schedule containing the LFSR.
*/
STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16])
{
uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7);
TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7);
TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7);
TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7);
TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7);
TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7);
TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7);
TK1[0] = (TK1[0] << 1) ^ (mask & 0x95);
}
/**
* \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2.
*
* \param TK1 Points to the TK1 part of the key schedule containing the LFSR.
*
* For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of
* the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT.
*/
STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16])
{
uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7);
TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7);
TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7);
TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B);
}
/**
* \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3.
*
* \param TK1 Points to the TK1 part of the key schedule containing the LFSR.
*/
#define romulus3_update_counter(TK1) romulus2_update_counter((TK1))
/**
* \brief Process the asssociated data for Romulus-N1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void romulus_n1_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen)
{
unsigned char temp;
/* Handle the special case of no associated data */
if (adlen == 0) {
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x1A);
skinny_128_384_encrypt_tk2(ks, S, S, npub);
return;
}
/* Process all double blocks except the last */
romulus1_set_domain(ks, 0x08);
while (adlen > 32) {
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
ad += 32;
adlen -= 32;
}
/* Pad and process the left-over blocks */
romulus1_update_counter(ks->TK1);
temp = (unsigned)adlen;
if (temp == 32) {
/* Left-over complete double block */
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x18);
} else if (temp > 16) {
/* Left-over partial double block */
unsigned char pad[16];
temp -= 16;
lw_xor_block(S, ad, 16);
memcpy(pad, ad + 16, temp);
memset(pad + temp, 0, 15 - temp);
pad[15] = temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x1A);
} else if (temp == 16) {
/* Left-over complete single block */
lw_xor_block(S, ad, temp);
romulus1_set_domain(ks, 0x18);
} else {
/* Left-over partial single block */
lw_xor_block(S, ad, temp);
S[15] ^= temp;
romulus1_set_domain(ks, 0x1A);
}
skinny_128_384_encrypt_tk2(ks, S, S, npub);
}
/**
* \brief Process the asssociated data for Romulus-N2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void romulus_n2_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen)
{
unsigned char temp;
/* Handle the special case of no associated data */
if (adlen == 0) {
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x5A);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all double blocks except the last */
romulus2_set_domain(ks, 0x48);
while (adlen > 28) {
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Pad and process the left-over blocks */
romulus2_update_counter(ks->TK1);
temp = (unsigned)adlen;
if (temp == 28) {
/* Left-over complete double block */
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x58);
} else if (temp > 16) {
/* Left-over partial double block */
temp -= 16;
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp);
ks->TK1[15] = temp;
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x5A);
} else if (temp == 16) {
/* Left-over complete single block */
lw_xor_block(S, ad, temp);
romulus2_set_domain(ks, 0x58);
} else {
/* Left-over partial single block */
lw_xor_block(S, ad, temp);
S[15] ^= temp;
romulus2_set_domain(ks, 0x5A);
}
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Process the asssociated data for Romulus-N3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void romulus_n3_process_ad
(skinny_128_256_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen)
{
unsigned char temp;
/* Handle the special case of no associated data */
if (adlen == 0) {
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x9A);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_256_encrypt(ks, S, S);
return;
}
/* Process all double blocks except the last */
romulus3_set_domain(ks, 0x88);
while (adlen > 28) {
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Pad and process the left-over blocks */
romulus3_update_counter(ks->TK1);
temp = (unsigned)adlen;
if (temp == 28) {
/* Left-over complete double block */
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x98);
} else if (temp > 16) {
/* Left-over partial double block */
temp -= 16;
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp);
ks->TK1[15] = temp;
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x9A);
} else if (temp == 16) {
/* Left-over complete single block */
lw_xor_block(S, ad, temp);
romulus3_set_domain(ks, 0x98);
} else {
/* Left-over partial single block */
lw_xor_block(S, ad, temp);
S[15] ^= temp;
romulus3_set_domain(ks, 0x9A);
}
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Determine the domain separation value to use on the last
* block of the associated data processing.
*
* \param adlen Length of the associated data in bytes.
* \param mlen Length of the message in bytes.
* \param t Size of the second half of a double block; 12 or 16.
*
* \return The domain separation bits to use to finalize the last block.
*/
static uint8_t romulus_m_final_ad_domain
(unsigned long long adlen, unsigned long long mlen, unsigned t)
{
uint8_t domain = 0;
unsigned split = 16U;
unsigned leftover;
/* Determine which domain bits we need based on the length of the ad */
if (adlen == 0) {
/* No associated data, so only 1 block with padding */
domain ^= 0x02;
split = t;
} else {
/* Even or odd associated data length? */
leftover = (unsigned)(adlen % (16U + t));
if (leftover == 0) {
/* Even with a full double block at the end */
domain ^= 0x08;
} else if (leftover < split) {
/* Odd with a partial single block at the end */
domain ^= 0x02;
split = t;
} else if (leftover > split) {
/* Even with a partial double block at the end */
domain ^= 0x0A;
} else {
/* Odd with a full single block at the end */
split = t;
}
}
/* Determine which domain bits we need based on the length of the message */
if (mlen == 0) {
/* No message, so only 1 block with padding */
domain ^= 0x01;
} else {
/* Even or odd message length? */
leftover = (unsigned)(mlen % (16U + t));
if (leftover == 0) {
/* Even with a full double block at the end */
domain ^= 0x04;
} else if (leftover < split) {
/* Odd with a partial single block at the end */
domain ^= 0x01;
} else if (leftover > split) {
/* Even with a partial double block at the end */
domain ^= 0x05;
}
}
return domain;
}
/**
* \brief Process the asssociated data for Romulus-M1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
* \param m Points to the message plaintext.
* \param mlen Length of the message plaintext.
*/
static void romulus_m1_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *m, unsigned long long mlen)
{
unsigned char pad[16];
uint8_t final_domain = 0x30;
unsigned temp;
/* Determine the domain separator to use on the final block */
final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16);
/* Process all associated data double blocks except the last */
romulus1_set_domain(ks, 0x28);
while (adlen > 32) {
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
ad += 32;
adlen -= 32;
}
/* Process the last associated data double block */
temp = (unsigned)adlen;
if (temp == 32) {
/* Last associated data double block is full */
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
} else if (temp > 16) {
/* Last associated data double block is partial */
temp -= 16;
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(pad, ad + 16, temp);
memset(pad + temp, 0, sizeof(pad) - temp - 1);
pad[sizeof(pad) - 1] = (unsigned char)temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
romulus1_update_counter(ks->TK1);
} else {
/* Last associated data block is single. Needs to be combined
* with the first block of the message payload */
romulus1_set_domain(ks, 0x2C);
romulus1_update_counter(ks->TK1);
if (temp == 16) {
lw_xor_block(S, ad, 16);
} else {
lw_xor_block(S, ad, temp);
S[15] ^= (unsigned char)temp;
}
if (mlen > 16) {
skinny_128_384_encrypt_tk2(ks, S, S, m);
romulus1_update_counter(ks->TK1);
m += 16;
mlen -= 16;
} else if (mlen == 16) {
skinny_128_384_encrypt_tk2(ks, S, S, m);
m += 16;
mlen -= 16;
} else {
temp = (unsigned)mlen;
memcpy(pad, m, temp);
memset(pad + temp, 0, sizeof(pad) - temp - 1);
pad[sizeof(pad) - 1] = (unsigned char)temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
mlen = 0;
}
}
/* Process all message double blocks except the last */
romulus1_set_domain(ks, 0x2C);
while (mlen > 32) {
romulus1_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
skinny_128_384_encrypt_tk2(ks, S, S, m + 16);
romulus1_update_counter(ks->TK1);
m += 32;
mlen -= 32;
}
/* Process the last message double block */
temp = (unsigned)mlen;
if (temp == 32) {
/* Last message double block is full */
romulus1_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
skinny_128_384_encrypt_tk2(ks, S, S, m + 16);
} else if (temp > 16) {
/* Last message double block is partial */
temp -= 16;
romulus1_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(pad, m + 16, temp);
memset(pad + temp, 0, sizeof(pad) - temp - 1);
pad[sizeof(pad) - 1] = (unsigned char)temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
} else if (temp == 16) {
/* Last message single block is full */
lw_xor_block(S, m, 16);
} else if (temp > 0) {
/* Last message single block is partial */
lw_xor_block(S, m, temp);
S[15] ^= (unsigned char)temp;
}
/* Process the last partial block */
romulus1_set_domain(ks, final_domain);
romulus1_update_counter(ks->TK1);
skinny_128_384_encrypt_tk2(ks, S, S, npub);
}
/**
* \brief Process the asssociated data for Romulus-M2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
* \param m Points to the message plaintext.
* \param mlen Length of the message plaintext.
*/
static void romulus_m2_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *m, unsigned long long mlen)
{
uint8_t final_domain = 0x70;
unsigned temp;
/* Determine the domain separator to use on the final block */
final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12);
/* Process all associated data double blocks except the last */
romulus2_set_domain(ks, 0x68);
while (adlen > 28) {
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Process the last associated data double block */
temp = (unsigned)adlen;
if (temp == 28) {
/* Last associated data double block is full */
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
} else if (temp > 16) {
/* Last associated data double block is partial */
temp -= 16;
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
} else {
/* Last associated data block is single. Needs to be combined
* with the first block of the message payload */
romulus2_set_domain(ks, 0x6C);
romulus2_update_counter(ks->TK1);
if (temp == 16) {
lw_xor_block(S, ad, 16);
} else {
lw_xor_block(S, ad, temp);
S[15] ^= (unsigned char)temp;
}
if (mlen > 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
m += 12;
mlen -= 12;
} else if (mlen == 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_384_encrypt(ks, S, S);
m += 12;
mlen -= 12;
} else {
temp = (unsigned)mlen;
memcpy(ks->TK1 + 4, m, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_384_encrypt(ks, S, S);
mlen = 0;
}
}
/* Process all message double blocks except the last */
romulus2_set_domain(ks, 0x6C);
while (mlen > 28) {
romulus2_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
m += 28;
mlen -= 28;
}
/* Process the last message double block */
temp = (unsigned)mlen;
if (temp == 28) {
/* Last message double block is full */
romulus2_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_384_encrypt(ks, S, S);
} else if (temp > 16) {
/* Last message double block is partial */
temp -= 16;
romulus2_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_384_encrypt(ks, S, S);
} else if (temp == 16) {
/* Last message single block is full */
lw_xor_block(S, m, 16);
} else if (temp > 0) {
/* Last message single block is partial */
lw_xor_block(S, m, temp);
S[15] ^= (unsigned char)temp;
}
/* Process the last partial block */
romulus2_set_domain(ks, final_domain);
romulus2_update_counter(ks->TK1);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Process the asssociated data for Romulus-M3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
* \param m Points to the message plaintext.
* \param mlen Length of the message plaintext.
*/
static void romulus_m3_process_ad
(skinny_128_256_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *m, unsigned long long mlen)
{
uint8_t final_domain = 0xB0;
unsigned temp;
/* Determine the domain separator to use on the final block */
final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12);
/* Process all associated data double blocks except the last */
romulus3_set_domain(ks, 0xA8);
while (adlen > 28) {
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Process the last associated data double block */
temp = (unsigned)adlen;
if (temp == 28) {
/* Last associated data double block is full */
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
} else if (temp > 16) {
/* Last associated data double block is partial */
temp -= 16;
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
} else {
/* Last associated data block is single. Needs to be combined
* with the first block of the message payload */
romulus3_set_domain(ks, 0xAC);
romulus3_update_counter(ks->TK1);
if (temp == 16) {
lw_xor_block(S, ad, 16);
} else {
lw_xor_block(S, ad, temp);
S[15] ^= (unsigned char)temp;
}
if (mlen > 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
m += 12;
mlen -= 12;
} else if (mlen == 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_256_encrypt(ks, S, S);
m += 12;
mlen -= 12;
} else {
temp = (unsigned)mlen;
memcpy(ks->TK1 + 4, m, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_256_encrypt(ks, S, S);
mlen = 0;
}
}
/* Process all message double blocks except the last */
romulus3_set_domain(ks, 0xAC);
while (mlen > 28) {
romulus3_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
m += 28;
mlen -= 28;
}
/* Process the last message double block */
temp = (unsigned)mlen;
if (temp == 28) {
/* Last message double block is full */
romulus3_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_256_encrypt(ks, S, S);
} else if (temp > 16) {
/* Last message double block is partial */
temp -= 16;
romulus3_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_256_encrypt(ks, S, S);
} else if (temp == 16) {
/* Last message single block is full */
lw_xor_block(S, m, 16);
} else if (temp > 0) {
/* Last message single block is partial */
lw_xor_block(S, m, temp);
S[15] ^= (unsigned char)temp;
}
/* Process the last partial block */
romulus3_set_domain(ks, final_domain);
romulus3_update_counter(ks->TK1);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Applies the Romulus rho function.
*
* \param S The rolling Romulus state.
* \param C Ciphertext message output block.
* \param M Plaintext message input block.
*/
STATIC_INLINE void romulus_rho
(unsigned char S[16], unsigned char C[16], const unsigned char M[16])
{
unsigned index;
for (index = 0; index < 16; ++index) {
unsigned char s = S[index];
unsigned char m = M[index];
S[index] ^= m;
C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
}
}
/**
* \brief Applies the inverse of the Romulus rho function.
*
* \param S The rolling Romulus state.
* \param M Plaintext message output block.
* \param C Ciphertext message input block.
*/
STATIC_INLINE void romulus_rho_inverse
(unsigned char S[16], unsigned char M[16], const unsigned char C[16])
{
unsigned index;
for (index = 0; index < 16; ++index) {
unsigned char s = S[index];
unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
S[index] ^= m;
M[index] = m;
}
}
/**
* \brief Applies the Romulus rho function to a short block.
*
* \param S The rolling Romulus state.
* \param C Ciphertext message output block.
* \param M Plaintext message input block.
* \param len Length of the short block, must be less than 16.
*/
STATIC_INLINE void romulus_rho_short
(unsigned char S[16], unsigned char C[16],
const unsigned char M[16], unsigned len)
{
unsigned index;
for (index = 0; index < len; ++index) {
unsigned char s = S[index];
unsigned char m = M[index];
S[index] ^= m;
C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
}
S[15] ^= (unsigned char)len; /* Padding */
}
/**
* \brief Applies the inverse of the Romulus rho function to a short block.
*
* \param S The rolling Romulus state.
* \param M Plaintext message output block.
* \param C Ciphertext message input block.
* \param len Length of the short block, must be less than 16.
*/
STATIC_INLINE void romulus_rho_inverse_short
(unsigned char S[16], unsigned char M[16],
const unsigned char C[16], unsigned len)
{
unsigned index;
for (index = 0; index < len; ++index) {
unsigned char s = S[index];
unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
S[index] ^= m;
M[index] = m;
}
S[15] ^= (unsigned char)len; /* Padding */
}
/**
* \brief Encrypts a plaintext message with Romulus-N1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n1_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no plaintext */
if (mlen == 0) {
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x15);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus1_set_domain(ks, 0x04);
while (mlen > 16) {
romulus_rho(S, c, m);
romulus1_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus1_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_short(S, c, m, temp);
romulus1_set_domain(ks, 0x15);
} else {
romulus_rho(S, c, m);
romulus1_set_domain(ks, 0x14);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Decrypts a ciphertext message with Romulus-N1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n1_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no ciphertext */
if (mlen == 0) {
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x15);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus1_set_domain(ks, 0x04);
while (mlen > 16) {
romulus_rho_inverse(S, m, c);
romulus1_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus1_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_inverse_short(S, m, c, temp);
romulus1_set_domain(ks, 0x15);
} else {
romulus_rho_inverse(S, m, c);
romulus1_set_domain(ks, 0x14);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Encrypts a plaintext message with Romulus-N2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n2_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no plaintext */
if (mlen == 0) {
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x55);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus2_set_domain(ks, 0x44);
while (mlen > 16) {
romulus_rho(S, c, m);
romulus2_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus2_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_short(S, c, m, temp);
romulus2_set_domain(ks, 0x55);
} else {
romulus_rho(S, c, m);
romulus2_set_domain(ks, 0x54);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Decrypts a ciphertext message with Romulus-N2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n2_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no ciphertext */
if (mlen == 0) {
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x55);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus2_set_domain(ks, 0x44);
while (mlen > 16) {
romulus_rho_inverse(S, m, c);
romulus2_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus2_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_inverse_short(S, m, c, temp);
romulus2_set_domain(ks, 0x55);
} else {
romulus_rho_inverse(S, m, c);
romulus2_set_domain(ks, 0x54);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Encrypts a plaintext message with Romulus-N3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n3_encrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no plaintext */
if (mlen == 0) {
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x95);
skinny_128_256_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus3_set_domain(ks, 0x84);
while (mlen > 16) {
romulus_rho(S, c, m);
romulus3_update_counter(ks->TK1);
skinny_128_256_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus3_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_short(S, c, m, temp);
romulus3_set_domain(ks, 0x95);
} else {
romulus_rho(S, c, m);
romulus3_set_domain(ks, 0x94);
}
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Decrypts a ciphertext message with Romulus-N3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n3_decrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no ciphertext */
if (mlen == 0) {
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x95);
skinny_128_256_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus3_set_domain(ks, 0x84);
while (mlen > 16) {
romulus_rho_inverse(S, m, c);
romulus3_update_counter(ks->TK1);
skinny_128_256_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus3_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_inverse_short(S, m, c, temp);
romulus3_set_domain(ks, 0x95);
} else {
romulus_rho_inverse(S, m, c);
romulus3_set_domain(ks, 0x94);
}
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Encrypts a plaintext message with Romulus-M1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m1_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus1_set_domain(ks, 0x24);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho(S, c, m);
romulus1_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_short(S, c, m, (unsigned)mlen);
}
/**
* \brief Decrypts a ciphertext message with Romulus-M1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m1_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus1_set_domain(ks, 0x24);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse(S, m, c);
romulus1_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse_short(S, m, c, (unsigned)mlen);
}
/**
* \brief Encrypts a plaintext message with Romulus-M2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m2_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus2_set_domain(ks, 0x64);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho(S, c, m);
romulus2_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_short(S, c, m, (unsigned)mlen);
}
/**
* \brief Decrypts a ciphertext message with Romulus-M2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m2_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus2_set_domain(ks, 0x64);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse(S, m, c);
romulus2_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse_short(S, m, c, (unsigned)mlen);
}
/**
* \brief Encrypts a plaintext message with Romulus-M3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m3_encrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus3_set_domain(ks, 0xA4);
while (mlen > 16) {
skinny_128_256_encrypt(ks, S, S);
romulus_rho(S, c, m);
romulus3_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_256_encrypt(ks, S, S);
romulus_rho_short(S, c, m, (unsigned)mlen);
}
/**
* \brief Decrypts a ciphertext message with Romulus-M3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m3_decrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus3_set_domain(ks, 0xA4);
while (mlen > 16) {
skinny_128_256_encrypt(ks, S, S);
romulus_rho_inverse(S, m, c);
romulus3_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_256_encrypt(ks, S, S);
romulus_rho_inverse_short(S, m, c, (unsigned)mlen);
}
/**
* \brief Generates the authentication tag from the rolling Romulus state.
*
* \param T Buffer to receive the generated tag; can be the same as S.
* \param S The rolling Romulus state.
*/
STATIC_INLINE void romulus_generate_tag
(unsigned char T[16], const unsigned char S[16])
{
unsigned index;
for (index = 0; index < 16; ++index) {
unsigned char s = S[index];
T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7);
}
}
int romulus_n1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n1_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Encrypts the plaintext to produce the ciphertext */
romulus_n1_encrypt(&ks, S, c, m, mlen);
/* Generate the authentication tag */
romulus_generate_tag(c + mlen, S);
return 0;
}
int romulus_n1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n1_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext */
clen -= ROMULUS_TAG_SIZE;
romulus_n1_decrypt(&ks, S, m, c, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_n2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n2_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Encrypts the plaintext to produce the ciphertext */
romulus_n2_encrypt(&ks, S, c, m, mlen);
/* Generate the authentication tag */
romulus_generate_tag(c + mlen, S);
return 0;
}
int romulus_n2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n2_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext */
clen -= ROMULUS_TAG_SIZE;
romulus_n2_decrypt(&ks, S, m, c, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_n3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n3_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Encrypts the plaintext to produce the ciphertext */
romulus_n3_encrypt(&ks, S, c, m, mlen);
/* Generate the authentication tag */
romulus_generate_tag(c + mlen, S);
return 0;
}
int romulus_n3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n3_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext */
clen -= ROMULUS_TAG_SIZE;
romulus_n3_decrypt(&ks, S, m, c, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_m1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data and the plaintext message */
memset(S, 0, sizeof(S));
romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen);
/* Generate the authentication tag, which is also the initialization
* vector for the encryption portion of the packet processing */
romulus_generate_tag(S, S);
memcpy(c + mlen, S, ROMULUS_TAG_SIZE);
/* Re-initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Encrypt the plaintext to produce the ciphertext */
romulus_m1_encrypt(&ks, S, c, m, mlen);
return 0;
}
int romulus_m1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext, using the
* authentication tag as the initialization vector for decryption */
clen -= ROMULUS_TAG_SIZE;
memcpy(S, c + clen, ROMULUS_TAG_SIZE);
romulus_m1_decrypt(&ks, S, m, c, clen);
/* Re-initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_m2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data and the plaintext message */
memset(S, 0, sizeof(S));
romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen);
/* Generate the authentication tag, which is also the initialization
* vector for the encryption portion of the packet processing */
romulus_generate_tag(S, S);
memcpy(c + mlen, S, ROMULUS_TAG_SIZE);
/* Re-initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Encrypt the plaintext to produce the ciphertext */
romulus_m2_encrypt(&ks, S, c, m, mlen);
return 0;
}
int romulus_m2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext, using the
* authentication tag as the initialization vector for decryption */
clen -= ROMULUS_TAG_SIZE;
memcpy(S, c + clen, ROMULUS_TAG_SIZE);
romulus_m2_decrypt(&ks, S, m, c, clen);
/* Re-initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_m3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data and the plaintext message */
memset(S, 0, sizeof(S));
romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen);
/* Generate the authentication tag, which is also the initialization
* vector for the encryption portion of the packet processing */
romulus_generate_tag(S, S);
memcpy(c + mlen, S, ROMULUS_TAG_SIZE);
/* Re-initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Encrypt the plaintext to produce the ciphertext */
romulus_m3_encrypt(&ks, S, c, m, mlen);
return 0;
}
int romulus_m3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext, using the
* authentication tag as the initialization vector for decryption */
clen -= ROMULUS_TAG_SIZE;
memcpy(S, c + clen, ROMULUS_TAG_SIZE);
romulus_m3_decrypt(&ks, S, m, c, clen);
/* Re-initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LWCRYPTO_ROMULUS_H
#define LWCRYPTO_ROMULUS_H
#include "aead-common.h"
/**
* \file romulus.h
* \brief Romulus authenticated encryption algorithm family.
*
* Romulus is a family of authenticated encryption algorithms that
* are built around the SKINNY-128 tweakable block cipher. There
* are six members in the family:
*
* \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher. This is the
* primary member of the family.
* \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-256 tweakable block cipher.
* \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-256 tweakable block cipher.
*
* The Romulus-M variants are resistant to nonce reuse as long as the
* combination of the associated data and plaintext is unique. If the
* same associated data and plaintext are reused under the same nonce,
* then the scheme will leak that the same plaintext has been sent for a
* second time but will not reveal the plaintext itself.
*
* References: https://romulusae.github.io/romulus/
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Size of the key for all Romulus family members.
*/
#define ROMULUS_KEY_SIZE 16
/**
* \brief Size of the authentication tag for all Romulus family members.
*/
#define ROMULUS_TAG_SIZE 16
/**
* \brief Size of the nonce for Romulus-N1 and Romulus-M1.
*/
#define ROMULUS1_NONCE_SIZE 16
/**
* \brief Size of the nonce for Romulus-N2 and Romulus-M2.
*/
#define ROMULUS2_NONCE_SIZE 12
/**
* \brief Size of the nonce for Romulus-N3 and Romulus-M3.
*/
#define ROMULUS3_NONCE_SIZE 12
/**
* \brief Meta-information block for the Romulus-N1 cipher.
*/
extern aead_cipher_t const romulus_n1_cipher;
/**
* \brief Meta-information block for the Romulus-N2 cipher.
*/
extern aead_cipher_t const romulus_n2_cipher;
/**
* \brief Meta-information block for the Romulus-N3 cipher.
*/
extern aead_cipher_t const romulus_n3_cipher;
/**
* \brief Meta-information block for the Romulus-M1 cipher.
*/
extern aead_cipher_t const romulus_m1_cipher;
/**
* \brief Meta-information block for the Romulus-M2 cipher.
*/
extern aead_cipher_t const romulus_m2_cipher;
/**
* \brief Meta-information block for the Romulus-M3 cipher.
*/
extern aead_cipher_t const romulus_m3_cipher;
/**
* \brief Encrypts and authenticates a packet with Romulus-N1.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_n1_aead_decrypt()
*/
int romulus_n1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-N1.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_n1_aead_encrypt()
*/
int romulus_n1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-N2.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_n2_aead_decrypt()
*/
int romulus_n2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-N2.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_n2_aead_encrypt()
*/
int romulus_n2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-N3.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_n3_aead_decrypt()
*/
int romulus_n3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-N3.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_n3_aead_encrypt()
*/
int romulus_n3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-M1.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_m1_aead_decrypt()
*/
int romulus_m1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-M1.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_m1_aead_encrypt()
*/
int romulus_m1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-M2.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_m2_aead_decrypt()
*/
int romulus_m2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-M2.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_m2_aead_encrypt()
*/
int romulus_m2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-M3.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_m3_aead_decrypt()
*/
int romulus_m3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-M3.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_m3_aead_encrypt()
*/
int romulus_m3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "aead-common.h"
int aead_check_tag
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned size)
{
/* Set "accum" to -1 if the tags match, or 0 if they don't match */
int accum = 0;
while (size > 0) {
accum |= (*tag1++ ^ *tag2++);
--size;
}
accum = (accum - 1) >> 8;
/* Destroy the plaintext if the tag match failed */
while (plaintext_len > 0) {
*plaintext++ &= accum;
--plaintext_len;
}
/* If "accum" is 0, return -1, otherwise return 0 */
return ~accum;
}
int aead_check_tag_precheck
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned size, int precheck)
{
/* Set "accum" to -1 if the tags match, or 0 if they don't match */
int accum = 0;
while (size > 0) {
accum |= (*tag1++ ^ *tag2++);
--size;
}
accum = ((accum - 1) >> 8) & precheck;
/* Destroy the plaintext if the tag match failed */
while (plaintext_len > 0) {
*plaintext++ &= accum;
--plaintext_len;
}
/* If "accum" is 0, return -1, otherwise return 0 */
return ~accum;
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LWCRYPTO_AEAD_COMMON_H
#define LWCRYPTO_AEAD_COMMON_H
#include <stddef.h>
/**
* \file aead-common.h
* \brief Definitions that are common across AEAD schemes.
*
* AEAD stands for "Authenticated Encryption with Associated Data".
* It is a standard API pattern for securely encrypting and
* authenticating packets of data.
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Encrypts and authenticates a packet with an AEAD scheme.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - normally not used by AEAD schemes.
* \param npub Points to the public nonce for the packet.
* \param k Points to the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*/
typedef int (*aead_cipher_encrypt_t)
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with an AEAD scheme.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - normally not used by AEAD schemes.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet.
* \param k Points to the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*/
typedef int (*aead_cipher_decrypt_t)
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Hashes a block of input data.
*
* \param out Buffer to receive the hash output.
* \param in Points to the input data to be hashed.
* \param inlen Length of the input data in bytes.
*
* \return Returns zero on success or -1 if there was an error in the
* parameters.
*/
typedef int (*aead_hash_t)
(unsigned char *out, const unsigned char *in, unsigned long long inlen);
/**
* \brief Initializes the state for a hashing operation.
*
* \param state Hash state to be initialized.
*/
typedef void (*aead_hash_init_t)(void *state);
/**
* \brief Updates a hash state with more input data.
*
* \param state Hash state to be updated.
* \param in Points to the input data to be incorporated into the state.
* \param inlen Length of the input data to be incorporated into the state.
*/
typedef void (*aead_hash_update_t)
(void *state, const unsigned char *in, unsigned long long inlen);
/**
* \brief Returns the final hash value from a hashing operation.
*
* \param Hash state to be finalized.
* \param out Points to the output buffer to receive the hash value.
*/
typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out);
/**
* \brief Aborbs more input data into an XOF state.
*
* \param state XOF state to be updated.
* \param in Points to the input data to be absorbed into the state.
* \param inlen Length of the input data to be absorbed into the state.
*
* \sa ascon_xof_init(), ascon_xof_squeeze()
*/
typedef void (*aead_xof_absorb_t)
(void *state, const unsigned char *in, unsigned long long inlen);
/**
* \brief Squeezes output data from an XOF state.
*
* \param state XOF state to squeeze the output data from.
* \param out Points to the output buffer to receive the squeezed data.
* \param outlen Number of bytes of data to squeeze out of the state.
*/
typedef void (*aead_xof_squeeze_t)
(void *state, unsigned char *out, unsigned long long outlen);
/**
* \brief No special AEAD features.
*/
#define AEAD_FLAG_NONE 0x0000
/**
* \brief The natural byte order of the AEAD cipher is little-endian.
*
* If this flag is not present, then the natural byte order of the
* AEAD cipher should be assumed to be big-endian.
*
* The natural byte order may be useful when formatting packet sequence
* numbers as nonces. The application needs to know whether the sequence
* number should be packed into the leading or trailing bytes of the nonce.
*/
#define AEAD_FLAG_LITTLE_ENDIAN 0x0001
/**
* \brief Meta-information about an AEAD cipher.
*/
typedef struct
{
const char *name; /**< Name of the cipher */
unsigned key_len; /**< Length of the key in bytes */
unsigned nonce_len; /**< Length of the nonce in bytes */
unsigned tag_len; /**< Length of the tag in bytes */
unsigned flags; /**< Flags for extra features */
aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */
aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */
} aead_cipher_t;
/**
* \brief Meta-information about a hash algorithm that is related to an AEAD.
*
* Regular hash algorithms should provide the "hash", "init", "update",
* and "finalize" functions. Extensible Output Functions (XOF's) should
* proivde the "hash", "init", "absorb", and "squeeze" functions.
*/
typedef struct
{
const char *name; /**< Name of the hash algorithm */
size_t state_size; /**< Size of the incremental state structure */
unsigned hash_len; /**< Length of the hash in bytes */
unsigned flags; /**< Flags for extra features */
aead_hash_t hash; /**< All in one hashing function */
aead_hash_init_t init; /**< Incremental hash/XOF init function */
aead_hash_update_t update; /**< Incremental hash update function */
aead_hash_finalize_t finalize; /**< Incremental hash finalize function */
aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */
aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */
} aead_hash_algorithm_t;
/**
* \brief Check an authentication tag in constant time.
*
* \param plaintext Points to the plaintext data.
* \param plaintext_len Length of the plaintext in bytes.
* \param tag1 First tag to compare.
* \param tag2 Second tag to compare.
* \param tag_len Length of the tags in bytes.
*
* \return Returns -1 if the tag check failed or 0 if the check succeeded.
*
* If the tag check fails, then the \a plaintext will also be zeroed to
* prevent it from being used accidentally by the application when the
* ciphertext was invalid.
*/
int aead_check_tag
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned tag_len);
/**
* \brief Check an authentication tag in constant time with a previous check.
*
* \param plaintext Points to the plaintext data.
* \param plaintext_len Length of the plaintext in bytes.
* \param tag1 First tag to compare.
* \param tag2 Second tag to compare.
* \param tag_len Length of the tags in bytes.
* \param precheck Set to -1 if previous check succeeded or 0 if it failed.
*
* \return Returns -1 if the tag check failed or 0 if the check succeeded.
*
* If the tag check fails, then the \a plaintext will also be zeroed to
* prevent it from being used accidentally by the application when the
* ciphertext was invalid.
*
* This version can be used to incorporate other information about the
* correctness of the plaintext into the final result.
*/
int aead_check_tag_precheck
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned tag_len, int precheck);
#ifdef __cplusplus
}
#endif
#endif
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "romulus.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return romulus_n1_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return romulus_n1_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-skinny128.h"
#include "internal-skinnyutil.h"
#include "internal-util.h"
#include <string.h>
#if !defined(__AVR__)
STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk)
{
/* This function is used to fast-forward the TK1 tweak value
* to the value at the end of the key schedule for decryption.
*
* The tweak permutation repeats every 16 rounds, so SKINNY-128-256
* with 48 rounds does not need any fast forwarding applied.
* SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds
* are equivalent to applying the permutation 8 times:
*
* PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12]
*/
uint32_t row0 = tk[0];
uint32_t row1 = tk[1];
uint32_t row2 = tk[2];
uint32_t row3 = tk[3];
tk[0] = ((row1 >> 8) & 0x0000FFFFU) |
((row0 >> 8) & 0x00FF0000U) |
((row0 << 8) & 0xFF000000U);
tk[1] = ((row1 >> 24) & 0x000000FFU) |
((row0 << 8) & 0x00FFFF00U) |
((row1 << 24) & 0xFF000000U);
tk[2] = ((row3 >> 8) & 0x0000FFFFU) |
((row2 >> 8) & 0x00FF0000U) |
((row2 << 8) & 0xFF000000U);
tk[3] = ((row3 >> 24) & 0x000000FFU) |
((row2 << 8) & 0x00FFFF00U) |
((row3 << 24) & 0xFF000000U);
}
void skinny_128_384_init
(skinny_128_384_key_schedule_t *ks, const unsigned char key[48])
{
#if !SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint32_t *schedule;
unsigned round;
uint8_t rc;
#endif
#if SKINNY_128_SMALL_SCHEDULE
/* Copy the input key as-is when using the small key schedule version */
memcpy(ks->TK1, key, sizeof(ks->TK1));
memcpy(ks->TK2, key + 16, sizeof(ks->TK2));
memcpy(ks->TK3, key + 32, sizeof(ks->TK3));
#else
/* Set the initial states of TK1, TK2, and TK3 */
memcpy(ks->TK1, key, 16);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
TK3[0] = le_load_word32(key + 32);
TK3[1] = le_load_word32(key + 36);
TK3[2] = le_load_word32(key + 40);
TK3[3] = le_load_word32(key + 44);
/* Set up the key schedule using TK2 and TK3. TK1 is not added
* to the key schedule because we will derive that part of the
* schedule during encryption operations */
schedule = ks->k;
rc = 0;
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) {
/* XOR the round constants with the current schedule words.
* The round constants for the 3rd and 4th rows are
* fixed and will be applied during encryption. */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F);
schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4);
/* Permute TK2 and TK3 for the next round */
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
/* Apply the LFSR's to TK2 and TK3 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
}
#endif
}
void skinny_128_384_encrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 for the next round */
skinny128_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_decrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint8_t rc = 0x15;
#else
const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]);
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1 */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Permute TK1 to fast-forward it to the end of the key schedule */
skinny128_fast_forward_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_fast_forward_tk(TK2);
skinny128_fast_forward_tk(TK3);
for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) {
/* Also fast-forward the LFSR's on every byte of TK2 and TK3 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR2(TK2[2]);
skinny128_LFSR2(TK2[3]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
skinny128_LFSR3(TK3[2]);
skinny128_LFSR3(TK3[3]);
}
#endif
/* Perform all decryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Inverse permutation on TK1 for this round */
skinny128_inv_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_inv_permute_tk(TK2);
skinny128_inv_permute_tk(TK3);
skinny128_LFSR3(TK2[2]);
skinny128_LFSR3(TK2[3]);
skinny128_LFSR2(TK3[2]);
skinny128_LFSR2(TK3[3]);
#endif
/* Inverse mix of the columns */
temp = s3;
s3 = s0;
s0 = s1;
s1 = s2;
s3 ^= temp;
s2 = temp ^ s0;
s1 ^= s2;
/* Inverse shift of the rows */
s1 = leftRotate24(s1);
s2 = leftRotate16(s2);
s3 = leftRotate8(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20);
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
schedule -= 2;
#endif
s2 ^= 0x02;
/* Apply the inverse of the S-box to all bytes in the state */
skinny128_inv_sbox(s0);
skinny128_inv_sbox(s1);
skinny128_inv_sbox(s2);
skinny128_inv_sbox(s3);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK3[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
TK2[0] = le_load_word32(tk2);
TK2[1] = le_load_word32(tk2 + 4);
TK2[2] = le_load_word32(tk2 + 8);
TK2[3] = le_load_word32(tk2 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0] ^ TK2[0];
s1 ^= schedule[1] ^ TK1[1] ^ TK2[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK3);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_encrypt_tk_full
(const unsigned char key[48], unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
uint32_t TK3[4];
uint32_t temp;
unsigned round;
uint8_t rc = 0;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakey */
TK1[0] = le_load_word32(key);
TK1[1] = le_load_word32(key + 4);
TK1[2] = le_load_word32(key + 8);
TK1[3] = le_load_word32(key + 12);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
TK3[0] = le_load_word32(key + 32);
TK3[1] = le_load_word32(key + 36);
TK3[2] = le_load_word32(key + 40);
TK3[3] = le_load_word32(key + 44);
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1, TK2, and TK3 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_init
(skinny_128_256_key_schedule_t *ks, const unsigned char key[32])
{
#if !SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t *schedule;
unsigned round;
uint8_t rc;
#endif
#if SKINNY_128_SMALL_SCHEDULE
/* Copy the input key as-is when using the small key schedule version */
memcpy(ks->TK1, key, sizeof(ks->TK1));
memcpy(ks->TK2, key + 16, sizeof(ks->TK2));
#else
/* Set the initial states of TK1 and TK2 */
memcpy(ks->TK1, key, 16);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
/* Set up the key schedule using TK2. TK1 is not added
* to the key schedule because we will derive that part of the
* schedule during encryption operations */
schedule = ks->k;
rc = 0;
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) {
/* XOR the round constants with the current schedule words.
* The round constants for the 3rd and 4th rows are
* fixed and will be applied during encryption. */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
schedule[0] = TK2[0] ^ (rc & 0x0F);
schedule[1] = TK2[1] ^ (rc >> 4);
/* Permute TK2 for the next round */
skinny128_permute_tk(TK2);
/* Apply the LFSR to TK2 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
}
#endif
}
void skinny_128_256_encrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1 */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_decrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint8_t rc = 0x09;
#else
const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]);
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1.
* There is no need to fast-forward TK1 because the value at
* the end of the key schedule is the same as at the start */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) {
/* Also fast-forward the LFSR's on every byte of TK2 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR2(TK2[2]);
skinny128_LFSR2(TK2[3]);
}
#endif
/* Perform all decryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Inverse permutation on TK1 for this round */
skinny128_inv_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_inv_permute_tk(TK2);
skinny128_LFSR3(TK2[2]);
skinny128_LFSR3(TK2[3]);
#endif
/* Inverse mix of the columns */
temp = s3;
s3 = s0;
s0 = s1;
s1 = s2;
s3 ^= temp;
s2 = temp ^ s0;
s1 ^= s2;
/* Inverse shift of the rows */
s1 = leftRotate24(s1);
s2 = leftRotate16(s2);
s3 = leftRotate8(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20);
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
schedule -= 2;
#endif
s2 ^= 0x02;
/* Apply the inverse of the S-box to all bytes in the state */
skinny128_inv_sbox(s0);
skinny128_inv_sbox(s1);
skinny128_inv_sbox(s2);
skinny128_inv_sbox(s3);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_encrypt_tk_full
(const unsigned char key[32], unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
uint32_t temp;
unsigned round;
uint8_t rc = 0;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakey */
TK1[0] = le_load_word32(key);
TK1[1] = le_load_word32(key + 4);
TK1[2] = le_load_word32(key + 8);
TK1[3] = le_load_word32(key + 12);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
#else /* __AVR__ */
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2)
{
memcpy(ks->TK2, tk2, 16);
skinny_128_384_encrypt(ks, output, input);
}
#endif /* __AVR__ */
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_SKINNY128_H
#define LW_INTERNAL_SKINNY128_H
/**
* \file internal-skinny128.h
* \brief SKINNY-128 block cipher family.
*
* References: https://eprint.iacr.org/2016/660.pdf,
* https://sites.google.com/site/skinnycipher/
*/
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* \def SKINNY_128_SMALL_SCHEDULE
* \brief Defined to 1 to use the small key schedule version of SKINNY-128.
*/
#if defined(__AVR__)
#define SKINNY_128_SMALL_SCHEDULE 1
#else
#define SKINNY_128_SMALL_SCHEDULE 0
#endif
/**
* \brief Size of a block for SKINNY-128 block ciphers.
*/
#define SKINNY_128_BLOCK_SIZE 16
/**
* \brief Number of rounds for SKINNY-128-384.
*/
#define SKINNY_128_384_ROUNDS 56
/**
* \brief Structure of the key schedule for SKINNY-128-384.
*/
typedef struct
{
/** TK1 for the tweakable part of the key schedule */
uint8_t TK1[16];
#if SKINNY_128_SMALL_SCHEDULE
/** TK2 for the small key schedule */
uint8_t TK2[16];
/** TK3 for the small key schedule */
uint8_t TK3[16];
#else
/** Words of the full key schedule */
uint32_t k[SKINNY_128_384_ROUNDS * 2];
#endif
} skinny_128_384_key_schedule_t;
/**
* \brief Initializes the key schedule for SKINNY-128-384.
*
* \param ks Points to the key schedule to initialize.
* \param key Points to the key data.
*/
void skinny_128_384_init
(skinny_128_384_key_schedule_t *ks, const unsigned char key[48]);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_384_encrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Decrypts a 128-bit block with SKINNY-128-384.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_384_decrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly
* provided TK2 value.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
* \param tk2 TK2 value that should be updated on the fly.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when both TK1 and TK2 change from block to block.
* When the key is initialized with skinny_128_384_init(), the TK2 part of
* the key value should be set to zero.
*
* \note Some versions of this function may modify the key schedule to
* copy tk2 into place.
*/
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384 and a
* fully specified tweakey value.
*
* \param key Points to the 384-bit tweakey value.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when the entire tweakey changes from block to
* block. It is slower than the other versions of SKINNY-128-384 but
* more memory-efficient.
*/
void skinny_128_384_encrypt_tk_full
(const unsigned char key[48], unsigned char *output,
const unsigned char *input);
/**
* \brief Number of rounds for SKINNY-128-256.
*/
#define SKINNY_128_256_ROUNDS 48
/**
* \brief Structure of the key schedule for SKINNY-128-256.
*/
typedef struct
{
/** TK1 for the tweakable part of the key schedule */
uint8_t TK1[16];
#if SKINNY_128_SMALL_SCHEDULE
/** TK2 for the small key schedule */
uint8_t TK2[16];
#else
/** Words of the full key schedule */
uint32_t k[SKINNY_128_256_ROUNDS * 2];
#endif
} skinny_128_256_key_schedule_t;
/**
* \brief Initializes the key schedule for SKINNY-128-256.
*
* \param ks Points to the key schedule to initialize.
* \param key Points to the key data.
*/
void skinny_128_256_init
(skinny_128_256_key_schedule_t *ks, const unsigned char key[32]);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-256.
*
* \param ks Points to the SKINNY-128-256 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_256_encrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Decrypts a 128-bit block with SKINNY-128-256.
*
* \param ks Points to the SKINNY-128-256 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_256_decrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-256 and a
* fully specified tweakey value.
*
* \param key Points to the 256-bit tweakey value.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when the entire tweakey changes from block to
* block. It is slower than the other versions of SKINNY-128-256 but
* more memory-efficient.
*/
void skinny_128_256_encrypt_tk_full
(const unsigned char key[32], unsigned char *output,
const unsigned char *input);
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_SKINNYUTIL_H
#define LW_INTERNAL_SKINNYUTIL_H
/**
* \file internal-skinnyutil.h
* \brief Utilities to help implement SKINNY and its variants.
*/
#include "internal-util.h"
#ifdef __cplusplus
extern "C" {
#endif
/** @cond skinnyutil */
/* Utilities for implementing SKINNY-128 */
#define skinny128_LFSR2(x) \
do { \
uint32_t _x = (x); \
(x) = ((_x << 1) & 0xFEFEFEFEU) ^ \
(((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \
} while (0)
#define skinny128_LFSR3(x) \
do { \
uint32_t _x = (x); \
(x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \
(((_x << 7) ^ (_x << 1)) & 0x80808080U); \
} while (0)
/* LFSR2 and LFSR3 are inverses of each other */
#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x)
#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x)
#define skinny128_permute_tk(tk) \
do { \
/* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \
uint32_t row2 = tk[2]; \
uint32_t row3 = tk[3]; \
tk[2] = tk[0]; \
tk[3] = tk[1]; \
row3 = (row3 << 16) | (row3 >> 16); \
tk[0] = ((row2 >> 8) & 0x000000FFU) | \
((row2 << 16) & 0x00FF0000U) | \
( row3 & 0xFF00FF00U); \
tk[1] = ((row2 >> 16) & 0x000000FFU) | \
(row2 & 0xFF000000U) | \
((row3 << 8) & 0x0000FF00U) | \
( row3 & 0x00FF0000U); \
} while (0)
#define skinny128_inv_permute_tk(tk) \
do { \
/* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \
uint32_t row0 = tk[0]; \
uint32_t row1 = tk[1]; \
tk[0] = tk[2]; \
tk[1] = tk[3]; \
tk[2] = ((row0 >> 16) & 0x000000FFU) | \
((row0 << 8) & 0x0000FF00U) | \
((row1 << 16) & 0x00FF0000U) | \
( row1 & 0xFF000000U); \
tk[3] = ((row0 >> 16) & 0x0000FF00U) | \
((row0 << 16) & 0xFF000000U) | \
((row1 >> 16) & 0x000000FFU) | \
((row1 << 8) & 0x00FF0000U); \
} while (0)
/*
* Apply the SKINNY sbox. The original version from the specification is
* equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
* #define SBOX_SWAP(x)
* (((x) & 0xF9F9F9F9U) |
* (((x) >> 1) & 0x02020202U) |
* (((x) << 1) & 0x04040404U))
* #define SBOX_PERMUTE(x)
* ((((x) & 0x01010101U) << 2) |
* (((x) & 0x06060606U) << 5) |
* (((x) & 0x20202020U) >> 5) |
* (((x) & 0xC8C8C8C8U) >> 2) |
* (((x) & 0x10101010U) >> 1))
*
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* return SBOX_SWAP(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one
* final permuatation. This reduces the number of shift operations.
*/
#define skinny128_sbox(x) \
do { \
uint32_t y; \
\
/* Mix the bits */ \
x = ~x; \
x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \
y = (((x << 5) & (x << 1)) & 0x20202020U); \
x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \
y = (((x << 2) & (x << 1)) & 0x80808080U); \
x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \
y = (((x >> 5) & (x << 1)) & 0x04040404U); \
x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \
x = ~x; \
\
/* Permutation generated by http://programming.sirrida.de/calcperm.php */ \
/* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \
x = ((x & 0x08080808U) << 1) | \
((x & 0x32323232U) << 2) | \
((x & 0x01010101U) << 5) | \
((x & 0x80808080U) >> 6) | \
((x & 0x40404040U) >> 4) | \
((x & 0x04040404U) >> 2); \
} while (0)
/*
* Apply the inverse of the SKINNY sbox. The original version from the
* specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
* #define SBOX_SWAP(x)
* (((x) & 0xF9F9F9F9U) |
* (((x) >> 1) & 0x02020202U) |
* (((x) << 1) & 0x04040404U))
* #define SBOX_PERMUTE_INV(x)
* ((((x) & 0x08080808U) << 1) |
* (((x) & 0x32323232U) << 2) |
* (((x) & 0x01010101U) << 5) |
* (((x) & 0xC0C0C0C0U) >> 5) |
* (((x) & 0x04040404U) >> 2))
*
* x = SBOX_SWAP(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* return SBOX_MIX(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one
* final permuatation. This reduces the number of shift operations.
*/
#define skinny128_inv_sbox(x) \
do { \
uint32_t y; \
\
/* Mix the bits */ \
x = ~x; \
y = (((x >> 1) & (x >> 3)) & 0x01010101U); \
x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \
y = (((x >> 6) & (x >> 1)) & 0x02020202U); \
x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \
y = (((x << 2) & (x << 1)) & 0x80808080U); \
x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \
y = (((x << 5) & (x << 1)) & 0x20202020U); \
x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \
x = ~x; \
\
/* Permutation generated by http://programming.sirrida.de/calcperm.php */ \
/* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \
x = ((x & 0x01010101U) << 2) | \
((x & 0x04040404U) << 4) | \
((x & 0x02020202U) << 6) | \
((x & 0x20202020U) >> 5) | \
((x & 0xC8C8C8C8U) >> 2) | \
((x & 0x10101010U) >> 1); \
} while (0)
/* Utilities for implementing SKINNY-64 */
#define skinny64_LFSR2(x) \
do { \
uint16_t _x = (x); \
(x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \
} while (0)
#define skinny64_LFSR3(x) \
do { \
uint16_t _x = (x); \
(x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \
} while (0)
/* LFSR2 and LFSR3 are inverses of each other */
#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x)
#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x)
#define skinny64_permute_tk(tk) \
do { \
/* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \
uint16_t row2 = tk[2]; \
uint16_t row3 = tk[3]; \
tk[2] = tk[0]; \
tk[3] = tk[1]; \
row3 = (row3 << 8) | (row3 >> 8); \
tk[0] = ((row2 << 4) & 0xF000U) | \
((row2 >> 8) & 0x00F0U) | \
( row3 & 0x0F0FU); \
tk[1] = ((row2 << 8) & 0xF000U) | \
((row3 >> 4) & 0x0F00U) | \
( row3 & 0x00F0U) | \
( row2 & 0x000FU); \
} while (0)
#define skinny64_inv_permute_tk(tk) \
do { \
/* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \
uint16_t row0 = tk[0]; \
uint16_t row1 = tk[1]; \
tk[0] = tk[2]; \
tk[1] = tk[3]; \
tk[2] = ((row0 << 8) & 0xF000U) | \
((row0 >> 4) & 0x0F00U) | \
((row1 >> 8) & 0x00F0U) | \
( row1 & 0x000FU); \
tk[3] = ((row1 << 8) & 0xF000U) | \
((row0 << 8) & 0x0F00U) | \
((row1 >> 4) & 0x00F0U) | \
((row0 >> 8) & 0x000FU); \
} while (0)
/*
* Apply the SKINNY-64 sbox. The original version from the
* specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x))
* #define SBOX_SHIFT(x)
* ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U))
*
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* return SBOX_MIX(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_SHIFT steps to be performed with one final rotation.
* This reduces the number of required shift operations from 14 to 10.
*
* We can further reduce the number of NOT operations from 4 to 2
* using the technique from https://github.com/kste/skinny_avx to
* convert NOR-XOR operations into AND-XOR operations by converting
* the S-box into its NOT-inverse.
*/
#define skinny64_sbox(x) \
do { \
x = ~x; \
x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \
x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \
x = ~x; \
x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \
} while (0)
/*
* Apply the inverse of the SKINNY-64 sbox. The original version
* from the specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x))
* #define SBOX_SHIFT_INV(x)
* ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U))
*
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* return SBOX_MIX(x);
*/
#define skinny64_inv_sbox(x) \
do { \
x = ~x; \
x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \
x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \
x = ~x; \
x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \
} while (0)
/** @endcond */
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_UTIL_H
#define LW_INTERNAL_UTIL_H
#include <stdint.h>
/* Figure out how to inline functions using this C compiler */
#if defined(__STDC__) && __STDC_VERSION__ >= 199901L
#define STATIC_INLINE static inline
#elif defined(__GNUC__) || defined(__clang__)
#define STATIC_INLINE static __inline__
#else
#define STATIC_INLINE static
#endif
/* Try to figure out whether the CPU is little-endian or big-endian.
* May need to modify this to include new compiler-specific defines.
* Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your
* compiler flags when you compile this library */
#if defined(__x86_64) || defined(__x86_64__) || \
defined(__i386) || defined(__i386__) || \
defined(__AVR__) || defined(__arm) || defined(__arm__) || \
defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \
defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \
defined(__LITTLE_ENDIAN__)
#define LW_UTIL_LITTLE_ENDIAN 1
#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \
defined(__BIG_ENDIAN__)
/* Big endian */
#else
#error "Cannot determine the endianess of this platform"
#endif
/* Helper macros to load and store values while converting endian-ness */
/* Load a big-endian 32-bit word from a byte buffer */
#define be_load_word32(ptr) \
((((uint32_t)((ptr)[0])) << 24) | \
(((uint32_t)((ptr)[1])) << 16) | \
(((uint32_t)((ptr)[2])) << 8) | \
((uint32_t)((ptr)[3])))
/* Store a big-endian 32-bit word into a byte buffer */
#define be_store_word32(ptr, x) \
do { \
uint32_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 24); \
(ptr)[1] = (uint8_t)(_x >> 16); \
(ptr)[2] = (uint8_t)(_x >> 8); \
(ptr)[3] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 32-bit word from a byte buffer */
#define le_load_word32(ptr) \
((((uint32_t)((ptr)[3])) << 24) | \
(((uint32_t)((ptr)[2])) << 16) | \
(((uint32_t)((ptr)[1])) << 8) | \
((uint32_t)((ptr)[0])))
/* Store a little-endian 32-bit word into a byte buffer */
#define le_store_word32(ptr, x) \
do { \
uint32_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
(ptr)[2] = (uint8_t)(_x >> 16); \
(ptr)[3] = (uint8_t)(_x >> 24); \
} while (0)
/* Load a big-endian 64-bit word from a byte buffer */
#define be_load_word64(ptr) \
((((uint64_t)((ptr)[0])) << 56) | \
(((uint64_t)((ptr)[1])) << 48) | \
(((uint64_t)((ptr)[2])) << 40) | \
(((uint64_t)((ptr)[3])) << 32) | \
(((uint64_t)((ptr)[4])) << 24) | \
(((uint64_t)((ptr)[5])) << 16) | \
(((uint64_t)((ptr)[6])) << 8) | \
((uint64_t)((ptr)[7])))
/* Store a big-endian 64-bit word into a byte buffer */
#define be_store_word64(ptr, x) \
do { \
uint64_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 56); \
(ptr)[1] = (uint8_t)(_x >> 48); \
(ptr)[2] = (uint8_t)(_x >> 40); \
(ptr)[3] = (uint8_t)(_x >> 32); \
(ptr)[4] = (uint8_t)(_x >> 24); \
(ptr)[5] = (uint8_t)(_x >> 16); \
(ptr)[6] = (uint8_t)(_x >> 8); \
(ptr)[7] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 64-bit word from a byte buffer */
#define le_load_word64(ptr) \
((((uint64_t)((ptr)[7])) << 56) | \
(((uint64_t)((ptr)[6])) << 48) | \
(((uint64_t)((ptr)[5])) << 40) | \
(((uint64_t)((ptr)[4])) << 32) | \
(((uint64_t)((ptr)[3])) << 24) | \
(((uint64_t)((ptr)[2])) << 16) | \
(((uint64_t)((ptr)[1])) << 8) | \
((uint64_t)((ptr)[0])))
/* Store a little-endian 64-bit word into a byte buffer */
#define le_store_word64(ptr, x) \
do { \
uint64_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
(ptr)[2] = (uint8_t)(_x >> 16); \
(ptr)[3] = (uint8_t)(_x >> 24); \
(ptr)[4] = (uint8_t)(_x >> 32); \
(ptr)[5] = (uint8_t)(_x >> 40); \
(ptr)[6] = (uint8_t)(_x >> 48); \
(ptr)[7] = (uint8_t)(_x >> 56); \
} while (0)
/* Load a big-endian 16-bit word from a byte buffer */
#define be_load_word16(ptr) \
((((uint16_t)((ptr)[0])) << 8) | \
((uint16_t)((ptr)[1])))
/* Store a big-endian 16-bit word into a byte buffer */
#define be_store_word16(ptr, x) \
do { \
uint16_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 8); \
(ptr)[1] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 16-bit word from a byte buffer */
#define le_load_word16(ptr) \
((((uint16_t)((ptr)[1])) << 8) | \
((uint16_t)((ptr)[0])))
/* Store a little-endian 16-bit word into a byte buffer */
#define le_store_word16(ptr, x) \
do { \
uint16_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
} while (0)
/* XOR a source byte buffer against a destination */
#define lw_xor_block(dest, src, len) \
do { \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest++ ^= *_src++; \
--_len; \
} \
} while (0)
/* XOR two source byte buffers and put the result in a destination buffer */
#define lw_xor_block_2_src(dest, src1, src2, len) \
do { \
unsigned char *_dest = (dest); \
const unsigned char *_src1 = (src1); \
const unsigned char *_src2 = (src2); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest++ = *_src1++ ^ *_src2++; \
--_len; \
} \
} while (0)
/* XOR a source byte buffer against a destination and write to another
* destination at the same time */
#define lw_xor_block_2_dest(dest2, dest, src, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest2++ = (*_dest++ ^= *_src++); \
--_len; \
} \
} while (0)
/* XOR two byte buffers and write to a destination which at the same
* time copying the contents of src2 to dest2 */
#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src1 = (src1); \
const unsigned char *_src2 = (src2); \
unsigned _len = (len); \
while (_len > 0) { \
unsigned char _temp = *_src2++; \
*_dest2++ = _temp; \
*_dest++ = *_src1++ ^ _temp; \
--_len; \
} \
} while (0)
/* XOR a source byte buffer against a destination and write to another
* destination at the same time. This version swaps the source value
* into the "dest" buffer */
#define lw_xor_block_swap(dest2, dest, src, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
unsigned char _temp = *_src++; \
*_dest2++ = *_dest ^ _temp; \
*_dest++ = _temp; \
--_len; \
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
#define leftRotate(a, bits) \
(__extension__ ({ \
uint32_t _temp = (a); \
(_temp << (bits)) | (_temp >> (32 - (bits))); \
}))
/* Generic right rotate */
#define rightRotate(a, bits) \
(__extension__ ({ \
uint32_t _temp = (a); \
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
#define leftRotate2(a) (leftRotate((a), 2))
#define leftRotate3(a) (leftRotate((a), 3))
#define leftRotate4(a) (leftRotate((a), 4))
#define leftRotate5(a) (leftRotate((a), 5))
#define leftRotate6(a) (leftRotate((a), 6))
#define leftRotate7(a) (leftRotate((a), 7))
#define leftRotate8(a) (leftRotate((a), 8))
#define leftRotate9(a) (leftRotate((a), 9))
#define leftRotate10(a) (leftRotate((a), 10))
#define leftRotate11(a) (leftRotate((a), 11))
#define leftRotate12(a) (leftRotate((a), 12))
#define leftRotate13(a) (leftRotate((a), 13))
#define leftRotate14(a) (leftRotate((a), 14))
#define leftRotate15(a) (leftRotate((a), 15))
#define leftRotate16(a) (leftRotate((a), 16))
#define leftRotate17(a) (leftRotate((a), 17))
#define leftRotate18(a) (leftRotate((a), 18))
#define leftRotate19(a) (leftRotate((a), 19))
#define leftRotate20(a) (leftRotate((a), 20))
#define leftRotate21(a) (leftRotate((a), 21))
#define leftRotate22(a) (leftRotate((a), 22))
#define leftRotate23(a) (leftRotate((a), 23))
#define leftRotate24(a) (leftRotate((a), 24))
#define leftRotate25(a) (leftRotate((a), 25))
#define leftRotate26(a) (leftRotate((a), 26))
#define leftRotate27(a) (leftRotate((a), 27))
#define leftRotate28(a) (leftRotate((a), 28))
#define leftRotate29(a) (leftRotate((a), 29))
#define leftRotate30(a) (leftRotate((a), 30))
#define leftRotate31(a) (leftRotate((a), 31))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1(a) (rightRotate((a), 1))
#define rightRotate2(a) (rightRotate((a), 2))
#define rightRotate3(a) (rightRotate((a), 3))
#define rightRotate4(a) (rightRotate((a), 4))
#define rightRotate5(a) (rightRotate((a), 5))
#define rightRotate6(a) (rightRotate((a), 6))
#define rightRotate7(a) (rightRotate((a), 7))
#define rightRotate8(a) (rightRotate((a), 8))
#define rightRotate9(a) (rightRotate((a), 9))
#define rightRotate10(a) (rightRotate((a), 10))
#define rightRotate11(a) (rightRotate((a), 11))
#define rightRotate12(a) (rightRotate((a), 12))
#define rightRotate13(a) (rightRotate((a), 13))
#define rightRotate14(a) (rightRotate((a), 14))
#define rightRotate15(a) (rightRotate((a), 15))
#define rightRotate16(a) (rightRotate((a), 16))
#define rightRotate17(a) (rightRotate((a), 17))
#define rightRotate18(a) (rightRotate((a), 18))
#define rightRotate19(a) (rightRotate((a), 19))
#define rightRotate20(a) (rightRotate((a), 20))
#define rightRotate21(a) (rightRotate((a), 21))
#define rightRotate22(a) (rightRotate((a), 22))
#define rightRotate23(a) (rightRotate((a), 23))
#define rightRotate24(a) (rightRotate((a), 24))
#define rightRotate25(a) (rightRotate((a), 25))
#define rightRotate26(a) (rightRotate((a), 26))
#define rightRotate27(a) (rightRotate((a), 27))
#define rightRotate28(a) (rightRotate((a), 28))
#define rightRotate29(a) (rightRotate((a), 29))
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
#define leftRotate_64(a, bits) \
(__extension__ ({ \
uint64_t _temp = (a); \
(_temp << (bits)) | (_temp >> (64 - (bits))); \
}))
/* Generic right rotate */
#define rightRotate_64(a, bits) \
(__extension__ ({ \
uint64_t _temp = (a); \
(_temp >> (bits)) | (_temp << (64 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_64(a) (leftRotate_64((a), 1))
#define leftRotate2_64(a) (leftRotate_64((a), 2))
#define leftRotate3_64(a) (leftRotate_64((a), 3))
#define leftRotate4_64(a) (leftRotate_64((a), 4))
#define leftRotate5_64(a) (leftRotate_64((a), 5))
#define leftRotate6_64(a) (leftRotate_64((a), 6))
#define leftRotate7_64(a) (leftRotate_64((a), 7))
#define leftRotate8_64(a) (leftRotate_64((a), 8))
#define leftRotate9_64(a) (leftRotate_64((a), 9))
#define leftRotate10_64(a) (leftRotate_64((a), 10))
#define leftRotate11_64(a) (leftRotate_64((a), 11))
#define leftRotate12_64(a) (leftRotate_64((a), 12))
#define leftRotate13_64(a) (leftRotate_64((a), 13))
#define leftRotate14_64(a) (leftRotate_64((a), 14))
#define leftRotate15_64(a) (leftRotate_64((a), 15))
#define leftRotate16_64(a) (leftRotate_64((a), 16))
#define leftRotate17_64(a) (leftRotate_64((a), 17))
#define leftRotate18_64(a) (leftRotate_64((a), 18))
#define leftRotate19_64(a) (leftRotate_64((a), 19))
#define leftRotate20_64(a) (leftRotate_64((a), 20))
#define leftRotate21_64(a) (leftRotate_64((a), 21))
#define leftRotate22_64(a) (leftRotate_64((a), 22))
#define leftRotate23_64(a) (leftRotate_64((a), 23))
#define leftRotate24_64(a) (leftRotate_64((a), 24))
#define leftRotate25_64(a) (leftRotate_64((a), 25))
#define leftRotate26_64(a) (leftRotate_64((a), 26))
#define leftRotate27_64(a) (leftRotate_64((a), 27))
#define leftRotate28_64(a) (leftRotate_64((a), 28))
#define leftRotate29_64(a) (leftRotate_64((a), 29))
#define leftRotate30_64(a) (leftRotate_64((a), 30))
#define leftRotate31_64(a) (leftRotate_64((a), 31))
#define leftRotate32_64(a) (leftRotate_64((a), 32))
#define leftRotate33_64(a) (leftRotate_64((a), 33))
#define leftRotate34_64(a) (leftRotate_64((a), 34))
#define leftRotate35_64(a) (leftRotate_64((a), 35))
#define leftRotate36_64(a) (leftRotate_64((a), 36))
#define leftRotate37_64(a) (leftRotate_64((a), 37))
#define leftRotate38_64(a) (leftRotate_64((a), 38))
#define leftRotate39_64(a) (leftRotate_64((a), 39))
#define leftRotate40_64(a) (leftRotate_64((a), 40))
#define leftRotate41_64(a) (leftRotate_64((a), 41))
#define leftRotate42_64(a) (leftRotate_64((a), 42))
#define leftRotate43_64(a) (leftRotate_64((a), 43))
#define leftRotate44_64(a) (leftRotate_64((a), 44))
#define leftRotate45_64(a) (leftRotate_64((a), 45))
#define leftRotate46_64(a) (leftRotate_64((a), 46))
#define leftRotate47_64(a) (leftRotate_64((a), 47))
#define leftRotate48_64(a) (leftRotate_64((a), 48))
#define leftRotate49_64(a) (leftRotate_64((a), 49))
#define leftRotate50_64(a) (leftRotate_64((a), 50))
#define leftRotate51_64(a) (leftRotate_64((a), 51))
#define leftRotate52_64(a) (leftRotate_64((a), 52))
#define leftRotate53_64(a) (leftRotate_64((a), 53))
#define leftRotate54_64(a) (leftRotate_64((a), 54))
#define leftRotate55_64(a) (leftRotate_64((a), 55))
#define leftRotate56_64(a) (leftRotate_64((a), 56))
#define leftRotate57_64(a) (leftRotate_64((a), 57))
#define leftRotate58_64(a) (leftRotate_64((a), 58))
#define leftRotate59_64(a) (leftRotate_64((a), 59))
#define leftRotate60_64(a) (leftRotate_64((a), 60))
#define leftRotate61_64(a) (leftRotate_64((a), 61))
#define leftRotate62_64(a) (leftRotate_64((a), 62))
#define leftRotate63_64(a) (leftRotate_64((a), 63))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_64(a) (rightRotate_64((a), 1))
#define rightRotate2_64(a) (rightRotate_64((a), 2))
#define rightRotate3_64(a) (rightRotate_64((a), 3))
#define rightRotate4_64(a) (rightRotate_64((a), 4))
#define rightRotate5_64(a) (rightRotate_64((a), 5))
#define rightRotate6_64(a) (rightRotate_64((a), 6))
#define rightRotate7_64(a) (rightRotate_64((a), 7))
#define rightRotate8_64(a) (rightRotate_64((a), 8))
#define rightRotate9_64(a) (rightRotate_64((a), 9))
#define rightRotate10_64(a) (rightRotate_64((a), 10))
#define rightRotate11_64(a) (rightRotate_64((a), 11))
#define rightRotate12_64(a) (rightRotate_64((a), 12))
#define rightRotate13_64(a) (rightRotate_64((a), 13))
#define rightRotate14_64(a) (rightRotate_64((a), 14))
#define rightRotate15_64(a) (rightRotate_64((a), 15))
#define rightRotate16_64(a) (rightRotate_64((a), 16))
#define rightRotate17_64(a) (rightRotate_64((a), 17))
#define rightRotate18_64(a) (rightRotate_64((a), 18))
#define rightRotate19_64(a) (rightRotate_64((a), 19))
#define rightRotate20_64(a) (rightRotate_64((a), 20))
#define rightRotate21_64(a) (rightRotate_64((a), 21))
#define rightRotate22_64(a) (rightRotate_64((a), 22))
#define rightRotate23_64(a) (rightRotate_64((a), 23))
#define rightRotate24_64(a) (rightRotate_64((a), 24))
#define rightRotate25_64(a) (rightRotate_64((a), 25))
#define rightRotate26_64(a) (rightRotate_64((a), 26))
#define rightRotate27_64(a) (rightRotate_64((a), 27))
#define rightRotate28_64(a) (rightRotate_64((a), 28))
#define rightRotate29_64(a) (rightRotate_64((a), 29))
#define rightRotate30_64(a) (rightRotate_64((a), 30))
#define rightRotate31_64(a) (rightRotate_64((a), 31))
#define rightRotate32_64(a) (rightRotate_64((a), 32))
#define rightRotate33_64(a) (rightRotate_64((a), 33))
#define rightRotate34_64(a) (rightRotate_64((a), 34))
#define rightRotate35_64(a) (rightRotate_64((a), 35))
#define rightRotate36_64(a) (rightRotate_64((a), 36))
#define rightRotate37_64(a) (rightRotate_64((a), 37))
#define rightRotate38_64(a) (rightRotate_64((a), 38))
#define rightRotate39_64(a) (rightRotate_64((a), 39))
#define rightRotate40_64(a) (rightRotate_64((a), 40))
#define rightRotate41_64(a) (rightRotate_64((a), 41))
#define rightRotate42_64(a) (rightRotate_64((a), 42))
#define rightRotate43_64(a) (rightRotate_64((a), 43))
#define rightRotate44_64(a) (rightRotate_64((a), 44))
#define rightRotate45_64(a) (rightRotate_64((a), 45))
#define rightRotate46_64(a) (rightRotate_64((a), 46))
#define rightRotate47_64(a) (rightRotate_64((a), 47))
#define rightRotate48_64(a) (rightRotate_64((a), 48))
#define rightRotate49_64(a) (rightRotate_64((a), 49))
#define rightRotate50_64(a) (rightRotate_64((a), 50))
#define rightRotate51_64(a) (rightRotate_64((a), 51))
#define rightRotate52_64(a) (rightRotate_64((a), 52))
#define rightRotate53_64(a) (rightRotate_64((a), 53))
#define rightRotate54_64(a) (rightRotate_64((a), 54))
#define rightRotate55_64(a) (rightRotate_64((a), 55))
#define rightRotate56_64(a) (rightRotate_64((a), 56))
#define rightRotate57_64(a) (rightRotate_64((a), 57))
#define rightRotate58_64(a) (rightRotate_64((a), 58))
#define rightRotate59_64(a) (rightRotate_64((a), 59))
#define rightRotate60_64(a) (rightRotate_64((a), 60))
#define rightRotate61_64(a) (rightRotate_64((a), 61))
#define rightRotate62_64(a) (rightRotate_64((a), 62))
#define rightRotate63_64(a) (rightRotate_64((a), 63))
/* Rotate a 16-bit value left by a number of bits */
#define leftRotate_16(a, bits) \
(__extension__ ({ \
uint16_t _temp = (a); \
(_temp << (bits)) | (_temp >> (16 - (bits))); \
}))
/* Rotate a 16-bit value right by a number of bits */
#define rightRotate_16(a, bits) \
(__extension__ ({ \
uint16_t _temp = (a); \
(_temp >> (bits)) | (_temp << (16 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_16(a) (leftRotate_16((a), 1))
#define leftRotate2_16(a) (leftRotate_16((a), 2))
#define leftRotate3_16(a) (leftRotate_16((a), 3))
#define leftRotate4_16(a) (leftRotate_16((a), 4))
#define leftRotate5_16(a) (leftRotate_16((a), 5))
#define leftRotate6_16(a) (leftRotate_16((a), 6))
#define leftRotate7_16(a) (leftRotate_16((a), 7))
#define leftRotate8_16(a) (leftRotate_16((a), 8))
#define leftRotate9_16(a) (leftRotate_16((a), 9))
#define leftRotate10_16(a) (leftRotate_16((a), 10))
#define leftRotate11_16(a) (leftRotate_16((a), 11))
#define leftRotate12_16(a) (leftRotate_16((a), 12))
#define leftRotate13_16(a) (leftRotate_16((a), 13))
#define leftRotate14_16(a) (leftRotate_16((a), 14))
#define leftRotate15_16(a) (leftRotate_16((a), 15))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_16(a) (rightRotate_16((a), 1))
#define rightRotate2_16(a) (rightRotate_16((a), 2))
#define rightRotate3_16(a) (rightRotate_16((a), 3))
#define rightRotate4_16(a) (rightRotate_16((a), 4))
#define rightRotate5_16(a) (rightRotate_16((a), 5))
#define rightRotate6_16(a) (rightRotate_16((a), 6))
#define rightRotate7_16(a) (rightRotate_16((a), 7))
#define rightRotate8_16(a) (rightRotate_16((a), 8))
#define rightRotate9_16(a) (rightRotate_16((a), 9))
#define rightRotate10_16(a) (rightRotate_16((a), 10))
#define rightRotate11_16(a) (rightRotate_16((a), 11))
#define rightRotate12_16(a) (rightRotate_16((a), 12))
#define rightRotate13_16(a) (rightRotate_16((a), 13))
#define rightRotate14_16(a) (rightRotate_16((a), 14))
#define rightRotate15_16(a) (rightRotate_16((a), 15))
/* Rotate an 8-bit value left by a number of bits */
#define leftRotate_8(a, bits) \
(__extension__ ({ \
uint8_t _temp = (a); \
(_temp << (bits)) | (_temp >> (8 - (bits))); \
}))
/* Rotate an 8-bit value right by a number of bits */
#define rightRotate_8(a, bits) \
(__extension__ ({ \
uint8_t _temp = (a); \
(_temp >> (bits)) | (_temp << (8 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_8(a) (leftRotate_8((a), 1))
#define leftRotate2_8(a) (leftRotate_8((a), 2))
#define leftRotate3_8(a) (leftRotate_8((a), 3))
#define leftRotate4_8(a) (leftRotate_8((a), 4))
#define leftRotate5_8(a) (leftRotate_8((a), 5))
#define leftRotate6_8(a) (leftRotate_8((a), 6))
#define leftRotate7_8(a) (leftRotate_8((a), 7))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_8(a) (rightRotate_8((a), 1))
#define rightRotate2_8(a) (rightRotate_8((a), 2))
#define rightRotate3_8(a) (rightRotate_8((a), 3))
#define rightRotate4_8(a) (rightRotate_8((a), 4))
#define rightRotate5_8(a) (rightRotate_8((a), 5))
#define rightRotate6_8(a) (rightRotate_8((a), 6))
#define rightRotate7_8(a) (rightRotate_8((a), 7))
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "romulus.h"
#include "internal-skinny128.h"
#include "internal-util.h"
#include <string.h>
aead_cipher_t const romulus_n1_cipher = {
"Romulus-N1",
ROMULUS_KEY_SIZE,
ROMULUS1_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_n1_aead_encrypt,
romulus_n1_aead_decrypt
};
aead_cipher_t const romulus_n2_cipher = {
"Romulus-N2",
ROMULUS_KEY_SIZE,
ROMULUS2_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_n2_aead_encrypt,
romulus_n2_aead_decrypt
};
aead_cipher_t const romulus_n3_cipher = {
"Romulus-N3",
ROMULUS_KEY_SIZE,
ROMULUS3_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_n3_aead_encrypt,
romulus_n3_aead_decrypt
};
aead_cipher_t const romulus_m1_cipher = {
"Romulus-M1",
ROMULUS_KEY_SIZE,
ROMULUS1_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_m1_aead_encrypt,
romulus_m1_aead_decrypt
};
aead_cipher_t const romulus_m2_cipher = {
"Romulus-M2",
ROMULUS_KEY_SIZE,
ROMULUS2_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_m2_aead_encrypt,
romulus_m2_aead_decrypt
};
aead_cipher_t const romulus_m3_cipher = {
"Romulus-M3",
ROMULUS_KEY_SIZE,
ROMULUS3_NONCE_SIZE,
ROMULUS_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
romulus_m3_aead_encrypt,
romulus_m3_aead_decrypt
};
/**
* \brief Limit on the number of bytes of message or associated data (128Mb).
*
* Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for
* payloads well into the petabyte range. It is unlikely that an embedded
* device will have that much memory to store a contiguous packet!
*
* Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper
* 24 bits are difficult to modify in the key schedule. So we only
* update the low 24 bits and leave the high 24 bits fixed.
*
* Romulus-N3 and Romulus-M3 use a 24-bit block counter.
*
* For all algorithms, we limit the block counter to 2^23 so that the block
* counter can never exceed 2^24 - 1.
*/
#define ROMULUS_DATA_LIMIT \
((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE))
/**
* \brief Initializes the key schedule for Romulus-N1 or Romulus-M1.
*
* \param ks Points to the key schedule to initialize.
* \param k Points to the 16 bytes of the key.
* \param npub Points to the 16 bytes of the nonce. May be NULL
* if the nonce will be updated on the fly.
*/
static void romulus1_init
(skinny_128_384_key_schedule_t *ks,
const unsigned char *k, const unsigned char *npub)
{
unsigned char TK[48];
TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */
memset(TK + 1, 0, 15);
if (npub)
memcpy(TK + 16, npub, 16);
else
memset(TK + 16, 0, 16);
memcpy(TK + 32, k, 16);
skinny_128_384_init(ks, TK);
}
/**
* \brief Initializes the key schedule for Romulus-N2 or Romulus-M2.
*
* \param ks Points to the key schedule to initialize.
* \param k Points to the 16 bytes of the key.
* \param npub Points to the 12 bytes of the nonce. May be NULL
* if the nonce will be updated on the fly.
*/
static void romulus2_init
(skinny_128_384_key_schedule_t *ks,
const unsigned char *k, const unsigned char *npub)
{
unsigned char TK[48];
TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */
if (npub) {
TK[1] = TK[2] = TK[3] = 0;
memcpy(TK + 4, npub, 12);
} else {
memset(TK + 1, 0, 15);
}
memcpy(TK + 16, k, 16);
TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */
memset(TK + 33, 0, 15);
skinny_128_384_init(ks, TK);
}
/**
* \brief Initializes the key schedule for Romulus-N3 or Romulus-M3.
*
* \param ks Points to the key schedule to initialize.
* \param k Points to the 16 bytes of the key.
* \param npub Points to the 12 bytes of the nonce. May be NULL
* if the nonce will be updated on the fly.
*/
static void romulus3_init
(skinny_128_256_key_schedule_t *ks,
const unsigned char *k, const unsigned char *npub)
{
unsigned char TK[32];
TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */
if (npub) {
TK[1] = TK[2] = TK[3] = 0;
memcpy(TK + 4, npub, 12);
} else {
memset(TK + 1, 0, 15);
}
memcpy(TK + 16, k, 16);
skinny_128_256_init(ks, TK);
}
/**
* \brief Sets the domain separation value for Romulus-N1 and M1.
*
* \param ks The key schedule to set the domain separation value into.
* \param domain The domain separation value.
*/
#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain))
/**
* \brief Sets the domain separation value for Romulus-N2 and M2.
*
* \param ks The key schedule to set the domain separation value into.
* \param domain The domain separation value.
*/
#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain))
/**
* \brief Sets the domain separation value for Romulus-N3 and M3.
*
* \param ks The key schedule to set the domain separation value into.
* \param domain The domain separation value.
*/
#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain))
/**
* \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1.
*
* \param TK1 Points to the TK1 part of the key schedule containing the LFSR.
*/
STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16])
{
uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7);
TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7);
TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7);
TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7);
TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7);
TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7);
TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7);
TK1[0] = (TK1[0] << 1) ^ (mask & 0x95);
}
/**
* \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2.
*
* \param TK1 Points to the TK1 part of the key schedule containing the LFSR.
*
* For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of
* the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT.
*/
STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16])
{
uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7);
TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7);
TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7);
TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B);
}
/**
* \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3.
*
* \param TK1 Points to the TK1 part of the key schedule containing the LFSR.
*/
#define romulus3_update_counter(TK1) romulus2_update_counter((TK1))
/**
* \brief Process the asssociated data for Romulus-N1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void romulus_n1_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen)
{
unsigned char temp;
/* Handle the special case of no associated data */
if (adlen == 0) {
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x1A);
skinny_128_384_encrypt_tk2(ks, S, S, npub);
return;
}
/* Process all double blocks except the last */
romulus1_set_domain(ks, 0x08);
while (adlen > 32) {
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
ad += 32;
adlen -= 32;
}
/* Pad and process the left-over blocks */
romulus1_update_counter(ks->TK1);
temp = (unsigned)adlen;
if (temp == 32) {
/* Left-over complete double block */
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x18);
} else if (temp > 16) {
/* Left-over partial double block */
unsigned char pad[16];
temp -= 16;
lw_xor_block(S, ad, 16);
memcpy(pad, ad + 16, temp);
memset(pad + temp, 0, 15 - temp);
pad[15] = temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x1A);
} else if (temp == 16) {
/* Left-over complete single block */
lw_xor_block(S, ad, temp);
romulus1_set_domain(ks, 0x18);
} else {
/* Left-over partial single block */
lw_xor_block(S, ad, temp);
S[15] ^= temp;
romulus1_set_domain(ks, 0x1A);
}
skinny_128_384_encrypt_tk2(ks, S, S, npub);
}
/**
* \brief Process the asssociated data for Romulus-N2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void romulus_n2_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen)
{
unsigned char temp;
/* Handle the special case of no associated data */
if (adlen == 0) {
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x5A);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all double blocks except the last */
romulus2_set_domain(ks, 0x48);
while (adlen > 28) {
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Pad and process the left-over blocks */
romulus2_update_counter(ks->TK1);
temp = (unsigned)adlen;
if (temp == 28) {
/* Left-over complete double block */
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x58);
} else if (temp > 16) {
/* Left-over partial double block */
temp -= 16;
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp);
ks->TK1[15] = temp;
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x5A);
} else if (temp == 16) {
/* Left-over complete single block */
lw_xor_block(S, ad, temp);
romulus2_set_domain(ks, 0x58);
} else {
/* Left-over partial single block */
lw_xor_block(S, ad, temp);
S[15] ^= temp;
romulus2_set_domain(ks, 0x5A);
}
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Process the asssociated data for Romulus-N3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void romulus_n3_process_ad
(skinny_128_256_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen)
{
unsigned char temp;
/* Handle the special case of no associated data */
if (adlen == 0) {
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x9A);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_256_encrypt(ks, S, S);
return;
}
/* Process all double blocks except the last */
romulus3_set_domain(ks, 0x88);
while (adlen > 28) {
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Pad and process the left-over blocks */
romulus3_update_counter(ks->TK1);
temp = (unsigned)adlen;
if (temp == 28) {
/* Left-over complete double block */
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x98);
} else if (temp > 16) {
/* Left-over partial double block */
temp -= 16;
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp);
ks->TK1[15] = temp;
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x9A);
} else if (temp == 16) {
/* Left-over complete single block */
lw_xor_block(S, ad, temp);
romulus3_set_domain(ks, 0x98);
} else {
/* Left-over partial single block */
lw_xor_block(S, ad, temp);
S[15] ^= temp;
romulus3_set_domain(ks, 0x9A);
}
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Determine the domain separation value to use on the last
* block of the associated data processing.
*
* \param adlen Length of the associated data in bytes.
* \param mlen Length of the message in bytes.
* \param t Size of the second half of a double block; 12 or 16.
*
* \return The domain separation bits to use to finalize the last block.
*/
static uint8_t romulus_m_final_ad_domain
(unsigned long long adlen, unsigned long long mlen, unsigned t)
{
uint8_t domain = 0;
unsigned split = 16U;
unsigned leftover;
/* Determine which domain bits we need based on the length of the ad */
if (adlen == 0) {
/* No associated data, so only 1 block with padding */
domain ^= 0x02;
split = t;
} else {
/* Even or odd associated data length? */
leftover = (unsigned)(adlen % (16U + t));
if (leftover == 0) {
/* Even with a full double block at the end */
domain ^= 0x08;
} else if (leftover < split) {
/* Odd with a partial single block at the end */
domain ^= 0x02;
split = t;
} else if (leftover > split) {
/* Even with a partial double block at the end */
domain ^= 0x0A;
} else {
/* Odd with a full single block at the end */
split = t;
}
}
/* Determine which domain bits we need based on the length of the message */
if (mlen == 0) {
/* No message, so only 1 block with padding */
domain ^= 0x01;
} else {
/* Even or odd message length? */
leftover = (unsigned)(mlen % (16U + t));
if (leftover == 0) {
/* Even with a full double block at the end */
domain ^= 0x04;
} else if (leftover < split) {
/* Odd with a partial single block at the end */
domain ^= 0x01;
} else if (leftover > split) {
/* Even with a partial double block at the end */
domain ^= 0x05;
}
}
return domain;
}
/**
* \brief Process the asssociated data for Romulus-M1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
* \param m Points to the message plaintext.
* \param mlen Length of the message plaintext.
*/
static void romulus_m1_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *m, unsigned long long mlen)
{
unsigned char pad[16];
uint8_t final_domain = 0x30;
unsigned temp;
/* Determine the domain separator to use on the final block */
final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16);
/* Process all associated data double blocks except the last */
romulus1_set_domain(ks, 0x28);
while (adlen > 32) {
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
ad += 32;
adlen -= 32;
}
/* Process the last associated data double block */
temp = (unsigned)adlen;
if (temp == 32) {
/* Last associated data double block is full */
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
skinny_128_384_encrypt_tk2(ks, S, S, ad + 16);
romulus1_update_counter(ks->TK1);
} else if (temp > 16) {
/* Last associated data double block is partial */
temp -= 16;
romulus1_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(pad, ad + 16, temp);
memset(pad + temp, 0, sizeof(pad) - temp - 1);
pad[sizeof(pad) - 1] = (unsigned char)temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
romulus1_update_counter(ks->TK1);
} else {
/* Last associated data block is single. Needs to be combined
* with the first block of the message payload */
romulus1_set_domain(ks, 0x2C);
romulus1_update_counter(ks->TK1);
if (temp == 16) {
lw_xor_block(S, ad, 16);
} else {
lw_xor_block(S, ad, temp);
S[15] ^= (unsigned char)temp;
}
if (mlen > 16) {
skinny_128_384_encrypt_tk2(ks, S, S, m);
romulus1_update_counter(ks->TK1);
m += 16;
mlen -= 16;
} else if (mlen == 16) {
skinny_128_384_encrypt_tk2(ks, S, S, m);
m += 16;
mlen -= 16;
} else {
temp = (unsigned)mlen;
memcpy(pad, m, temp);
memset(pad + temp, 0, sizeof(pad) - temp - 1);
pad[sizeof(pad) - 1] = (unsigned char)temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
mlen = 0;
}
}
/* Process all message double blocks except the last */
romulus1_set_domain(ks, 0x2C);
while (mlen > 32) {
romulus1_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
skinny_128_384_encrypt_tk2(ks, S, S, m + 16);
romulus1_update_counter(ks->TK1);
m += 32;
mlen -= 32;
}
/* Process the last message double block */
temp = (unsigned)mlen;
if (temp == 32) {
/* Last message double block is full */
romulus1_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
skinny_128_384_encrypt_tk2(ks, S, S, m + 16);
} else if (temp > 16) {
/* Last message double block is partial */
temp -= 16;
romulus1_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(pad, m + 16, temp);
memset(pad + temp, 0, sizeof(pad) - temp - 1);
pad[sizeof(pad) - 1] = (unsigned char)temp;
skinny_128_384_encrypt_tk2(ks, S, S, pad);
} else if (temp == 16) {
/* Last message single block is full */
lw_xor_block(S, m, 16);
} else if (temp > 0) {
/* Last message single block is partial */
lw_xor_block(S, m, temp);
S[15] ^= (unsigned char)temp;
}
/* Process the last partial block */
romulus1_set_domain(ks, final_domain);
romulus1_update_counter(ks->TK1);
skinny_128_384_encrypt_tk2(ks, S, S, npub);
}
/**
* \brief Process the asssociated data for Romulus-M2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
* \param m Points to the message plaintext.
* \param mlen Length of the message plaintext.
*/
static void romulus_m2_process_ad
(skinny_128_384_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *m, unsigned long long mlen)
{
uint8_t final_domain = 0x70;
unsigned temp;
/* Determine the domain separator to use on the final block */
final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12);
/* Process all associated data double blocks except the last */
romulus2_set_domain(ks, 0x68);
while (adlen > 28) {
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Process the last associated data double block */
temp = (unsigned)adlen;
if (temp == 28) {
/* Last associated data double block is full */
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
} else if (temp > 16) {
/* Last associated data double block is partial */
temp -= 16;
romulus2_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
} else {
/* Last associated data block is single. Needs to be combined
* with the first block of the message payload */
romulus2_set_domain(ks, 0x6C);
romulus2_update_counter(ks->TK1);
if (temp == 16) {
lw_xor_block(S, ad, 16);
} else {
lw_xor_block(S, ad, temp);
S[15] ^= (unsigned char)temp;
}
if (mlen > 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
m += 12;
mlen -= 12;
} else if (mlen == 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_384_encrypt(ks, S, S);
m += 12;
mlen -= 12;
} else {
temp = (unsigned)mlen;
memcpy(ks->TK1 + 4, m, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_384_encrypt(ks, S, S);
mlen = 0;
}
}
/* Process all message double blocks except the last */
romulus2_set_domain(ks, 0x6C);
while (mlen > 28) {
romulus2_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_384_encrypt(ks, S, S);
romulus2_update_counter(ks->TK1);
m += 28;
mlen -= 28;
}
/* Process the last message double block */
temp = (unsigned)mlen;
if (temp == 28) {
/* Last message double block is full */
romulus2_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_384_encrypt(ks, S, S);
} else if (temp > 16) {
/* Last message double block is partial */
temp -= 16;
romulus2_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_384_encrypt(ks, S, S);
} else if (temp == 16) {
/* Last message single block is full */
lw_xor_block(S, m, 16);
} else if (temp > 0) {
/* Last message single block is partial */
lw_xor_block(S, m, temp);
S[15] ^= (unsigned char)temp;
}
/* Process the last partial block */
romulus2_set_domain(ks, final_domain);
romulus2_update_counter(ks->TK1);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Process the asssociated data for Romulus-M3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param npub Points to the nonce.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
* \param m Points to the message plaintext.
* \param mlen Length of the message plaintext.
*/
static void romulus_m3_process_ad
(skinny_128_256_key_schedule_t *ks,
unsigned char S[16], const unsigned char *npub,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *m, unsigned long long mlen)
{
uint8_t final_domain = 0xB0;
unsigned temp;
/* Determine the domain separator to use on the final block */
final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12);
/* Process all associated data double blocks except the last */
romulus3_set_domain(ks, 0xA8);
while (adlen > 28) {
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
ad += 28;
adlen -= 28;
}
/* Process the last associated data double block */
temp = (unsigned)adlen;
if (temp == 28) {
/* Last associated data double block is full */
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
} else if (temp > 16) {
/* Last associated data double block is partial */
temp -= 16;
romulus3_update_counter(ks->TK1);
lw_xor_block(S, ad, 16);
memcpy(ks->TK1 + 4, ad + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
} else {
/* Last associated data block is single. Needs to be combined
* with the first block of the message payload */
romulus3_set_domain(ks, 0xAC);
romulus3_update_counter(ks->TK1);
if (temp == 16) {
lw_xor_block(S, ad, 16);
} else {
lw_xor_block(S, ad, temp);
S[15] ^= (unsigned char)temp;
}
if (mlen > 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
m += 12;
mlen -= 12;
} else if (mlen == 12) {
memcpy(ks->TK1 + 4, m, 12);
skinny_128_256_encrypt(ks, S, S);
m += 12;
mlen -= 12;
} else {
temp = (unsigned)mlen;
memcpy(ks->TK1 + 4, m, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_256_encrypt(ks, S, S);
mlen = 0;
}
}
/* Process all message double blocks except the last */
romulus3_set_domain(ks, 0xAC);
while (mlen > 28) {
romulus3_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_256_encrypt(ks, S, S);
romulus3_update_counter(ks->TK1);
m += 28;
mlen -= 28;
}
/* Process the last message double block */
temp = (unsigned)mlen;
if (temp == 28) {
/* Last message double block is full */
romulus3_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, 12);
skinny_128_256_encrypt(ks, S, S);
} else if (temp > 16) {
/* Last message double block is partial */
temp -= 16;
romulus3_update_counter(ks->TK1);
lw_xor_block(S, m, 16);
memcpy(ks->TK1 + 4, m + 16, temp);
memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1);
ks->TK1[15] = (unsigned char)temp;
skinny_128_256_encrypt(ks, S, S);
} else if (temp == 16) {
/* Last message single block is full */
lw_xor_block(S, m, 16);
} else if (temp > 0) {
/* Last message single block is partial */
lw_xor_block(S, m, temp);
S[15] ^= (unsigned char)temp;
}
/* Process the last partial block */
romulus3_set_domain(ks, final_domain);
romulus3_update_counter(ks->TK1);
memcpy(ks->TK1 + 4, npub, 12);
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Applies the Romulus rho function.
*
* \param S The rolling Romulus state.
* \param C Ciphertext message output block.
* \param M Plaintext message input block.
*/
STATIC_INLINE void romulus_rho
(unsigned char S[16], unsigned char C[16], const unsigned char M[16])
{
unsigned index;
for (index = 0; index < 16; ++index) {
unsigned char s = S[index];
unsigned char m = M[index];
S[index] ^= m;
C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
}
}
/**
* \brief Applies the inverse of the Romulus rho function.
*
* \param S The rolling Romulus state.
* \param M Plaintext message output block.
* \param C Ciphertext message input block.
*/
STATIC_INLINE void romulus_rho_inverse
(unsigned char S[16], unsigned char M[16], const unsigned char C[16])
{
unsigned index;
for (index = 0; index < 16; ++index) {
unsigned char s = S[index];
unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
S[index] ^= m;
M[index] = m;
}
}
/**
* \brief Applies the Romulus rho function to a short block.
*
* \param S The rolling Romulus state.
* \param C Ciphertext message output block.
* \param M Plaintext message input block.
* \param len Length of the short block, must be less than 16.
*/
STATIC_INLINE void romulus_rho_short
(unsigned char S[16], unsigned char C[16],
const unsigned char M[16], unsigned len)
{
unsigned index;
for (index = 0; index < len; ++index) {
unsigned char s = S[index];
unsigned char m = M[index];
S[index] ^= m;
C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
}
S[15] ^= (unsigned char)len; /* Padding */
}
/**
* \brief Applies the inverse of the Romulus rho function to a short block.
*
* \param S The rolling Romulus state.
* \param M Plaintext message output block.
* \param C Ciphertext message input block.
* \param len Length of the short block, must be less than 16.
*/
STATIC_INLINE void romulus_rho_inverse_short
(unsigned char S[16], unsigned char M[16],
const unsigned char C[16], unsigned len)
{
unsigned index;
for (index = 0; index < len; ++index) {
unsigned char s = S[index];
unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7));
S[index] ^= m;
M[index] = m;
}
S[15] ^= (unsigned char)len; /* Padding */
}
/**
* \brief Encrypts a plaintext message with Romulus-N1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n1_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no plaintext */
if (mlen == 0) {
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x15);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus1_set_domain(ks, 0x04);
while (mlen > 16) {
romulus_rho(S, c, m);
romulus1_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus1_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_short(S, c, m, temp);
romulus1_set_domain(ks, 0x15);
} else {
romulus_rho(S, c, m);
romulus1_set_domain(ks, 0x14);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Decrypts a ciphertext message with Romulus-N1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n1_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no ciphertext */
if (mlen == 0) {
romulus1_update_counter(ks->TK1);
romulus1_set_domain(ks, 0x15);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus1_set_domain(ks, 0x04);
while (mlen > 16) {
romulus_rho_inverse(S, m, c);
romulus1_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus1_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_inverse_short(S, m, c, temp);
romulus1_set_domain(ks, 0x15);
} else {
romulus_rho_inverse(S, m, c);
romulus1_set_domain(ks, 0x14);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Encrypts a plaintext message with Romulus-N2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n2_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no plaintext */
if (mlen == 0) {
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x55);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus2_set_domain(ks, 0x44);
while (mlen > 16) {
romulus_rho(S, c, m);
romulus2_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus2_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_short(S, c, m, temp);
romulus2_set_domain(ks, 0x55);
} else {
romulus_rho(S, c, m);
romulus2_set_domain(ks, 0x54);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Decrypts a ciphertext message with Romulus-N2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n2_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no ciphertext */
if (mlen == 0) {
romulus2_update_counter(ks->TK1);
romulus2_set_domain(ks, 0x55);
skinny_128_384_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus2_set_domain(ks, 0x44);
while (mlen > 16) {
romulus_rho_inverse(S, m, c);
romulus2_update_counter(ks->TK1);
skinny_128_384_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus2_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_inverse_short(S, m, c, temp);
romulus2_set_domain(ks, 0x55);
} else {
romulus_rho_inverse(S, m, c);
romulus2_set_domain(ks, 0x54);
}
skinny_128_384_encrypt(ks, S, S);
}
/**
* \brief Encrypts a plaintext message with Romulus-N3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n3_encrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no plaintext */
if (mlen == 0) {
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x95);
skinny_128_256_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus3_set_domain(ks, 0x84);
while (mlen > 16) {
romulus_rho(S, c, m);
romulus3_update_counter(ks->TK1);
skinny_128_256_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus3_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_short(S, c, m, temp);
romulus3_set_domain(ks, 0x95);
} else {
romulus_rho(S, c, m);
romulus3_set_domain(ks, 0x94);
}
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Decrypts a ciphertext message with Romulus-N3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_n3_decrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
unsigned temp;
/* Handle the special case of no ciphertext */
if (mlen == 0) {
romulus3_update_counter(ks->TK1);
romulus3_set_domain(ks, 0x95);
skinny_128_256_encrypt(ks, S, S);
return;
}
/* Process all blocks except the last */
romulus3_set_domain(ks, 0x84);
while (mlen > 16) {
romulus_rho_inverse(S, m, c);
romulus3_update_counter(ks->TK1);
skinny_128_256_encrypt(ks, S, S);
c += 16;
m += 16;
mlen -= 16;
}
/* Pad and process the last block */
temp = (unsigned)mlen;
romulus3_update_counter(ks->TK1);
if (temp < 16) {
romulus_rho_inverse_short(S, m, c, temp);
romulus3_set_domain(ks, 0x95);
} else {
romulus_rho_inverse(S, m, c);
romulus3_set_domain(ks, 0x94);
}
skinny_128_256_encrypt(ks, S, S);
}
/**
* \brief Encrypts a plaintext message with Romulus-M1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m1_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus1_set_domain(ks, 0x24);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho(S, c, m);
romulus1_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_short(S, c, m, (unsigned)mlen);
}
/**
* \brief Decrypts a ciphertext message with Romulus-M1.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m1_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus1_set_domain(ks, 0x24);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse(S, m, c);
romulus1_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse_short(S, m, c, (unsigned)mlen);
}
/**
* \brief Encrypts a plaintext message with Romulus-M2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m2_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus2_set_domain(ks, 0x64);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho(S, c, m);
romulus2_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_short(S, c, m, (unsigned)mlen);
}
/**
* \brief Decrypts a ciphertext message with Romulus-M2.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m2_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus2_set_domain(ks, 0x64);
while (mlen > 16) {
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse(S, m, c);
romulus2_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_384_encrypt(ks, S, S);
romulus_rho_inverse_short(S, m, c, (unsigned)mlen);
}
/**
* \brief Encrypts a plaintext message with Romulus-M3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the buffer containing the plaintext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m3_encrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *c, const unsigned char *m, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus3_set_domain(ks, 0xA4);
while (mlen > 16) {
skinny_128_256_encrypt(ks, S, S);
romulus_rho(S, c, m);
romulus3_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_256_encrypt(ks, S, S);
romulus_rho_short(S, c, m, (unsigned)mlen);
}
/**
* \brief Decrypts a ciphertext message with Romulus-M3.
*
* \param ks Points to the key schedule.
* \param S The rolling Romulus state.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the buffer containing the ciphertext.
* \param mlen Length of the plaintext in bytes.
*/
static void romulus_m3_decrypt
(skinny_128_256_key_schedule_t *ks, unsigned char S[16],
unsigned char *m, const unsigned char *c, unsigned long long mlen)
{
/* Nothing to do if the message is empty */
if (!mlen)
return;
/* Process all block except the last */
romulus3_set_domain(ks, 0xA4);
while (mlen > 16) {
skinny_128_256_encrypt(ks, S, S);
romulus_rho_inverse(S, m, c);
romulus3_update_counter(ks->TK1);
c += 16;
m += 16;
mlen -= 16;
}
/* Handle the last block */
skinny_128_256_encrypt(ks, S, S);
romulus_rho_inverse_short(S, m, c, (unsigned)mlen);
}
/**
* \brief Generates the authentication tag from the rolling Romulus state.
*
* \param T Buffer to receive the generated tag; can be the same as S.
* \param S The rolling Romulus state.
*/
STATIC_INLINE void romulus_generate_tag
(unsigned char T[16], const unsigned char S[16])
{
unsigned index;
for (index = 0; index < 16; ++index) {
unsigned char s = S[index];
T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7);
}
}
int romulus_n1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n1_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Encrypts the plaintext to produce the ciphertext */
romulus_n1_encrypt(&ks, S, c, m, mlen);
/* Generate the authentication tag */
romulus_generate_tag(c + mlen, S);
return 0;
}
int romulus_n1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n1_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext */
clen -= ROMULUS_TAG_SIZE;
romulus_n1_decrypt(&ks, S, m, c, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_n2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n2_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Encrypts the plaintext to produce the ciphertext */
romulus_n2_encrypt(&ks, S, c, m, mlen);
/* Generate the authentication tag */
romulus_generate_tag(c + mlen, S);
return 0;
}
int romulus_n2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n2_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext */
clen -= ROMULUS_TAG_SIZE;
romulus_n2_decrypt(&ks, S, m, c, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_n3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n3_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Encrypts the plaintext to produce the ciphertext */
romulus_n3_encrypt(&ks, S, c, m, mlen);
/* Generate the authentication tag */
romulus_generate_tag(c + mlen, S);
return 0;
}
int romulus_n3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_n3_process_ad(&ks, S, npub, ad, adlen);
/* Re-initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext */
clen -= ROMULUS_TAG_SIZE;
romulus_n3_decrypt(&ks, S, m, c, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_m1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data and the plaintext message */
memset(S, 0, sizeof(S));
romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen);
/* Generate the authentication tag, which is also the initialization
* vector for the encryption portion of the packet processing */
romulus_generate_tag(S, S);
memcpy(c + mlen, S, ROMULUS_TAG_SIZE);
/* Re-initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Encrypt the plaintext to produce the ciphertext */
romulus_m1_encrypt(&ks, S, c, m, mlen);
return 0;
}
int romulus_m1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and nonce */
romulus1_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext, using the
* authentication tag as the initialization vector for decryption */
clen -= ROMULUS_TAG_SIZE;
memcpy(S, c + clen, ROMULUS_TAG_SIZE);
romulus_m1_decrypt(&ks, S, m, c, clen);
/* Re-initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus1_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_m2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data and the plaintext message */
memset(S, 0, sizeof(S));
romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen);
/* Generate the authentication tag, which is also the initialization
* vector for the encryption portion of the packet processing */
romulus_generate_tag(S, S);
memcpy(c + mlen, S, ROMULUS_TAG_SIZE);
/* Re-initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Encrypt the plaintext to produce the ciphertext */
romulus_m2_encrypt(&ks, S, c, m, mlen);
return 0;
}
int romulus_m2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and nonce */
romulus2_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext, using the
* authentication tag as the initialization vector for decryption */
clen -= ROMULUS_TAG_SIZE;
memcpy(S, c + clen, ROMULUS_TAG_SIZE);
romulus_m2_decrypt(&ks, S, m, c, clen);
/* Re-initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus2_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
int romulus_m3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT)
return -2;
/* Initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data and the plaintext message */
memset(S, 0, sizeof(S));
romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen);
/* Generate the authentication tag, which is also the initialization
* vector for the encryption portion of the packet processing */
romulus_generate_tag(S, S);
memcpy(c + mlen, S, ROMULUS_TAG_SIZE);
/* Re-initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Encrypt the plaintext to produce the ciphertext */
romulus_m3_encrypt(&ks, S, c, m, mlen);
return 0;
}
int romulus_m3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char S[16];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < ROMULUS_TAG_SIZE)
return -1;
*mlen = clen - ROMULUS_TAG_SIZE;
/* Validate the length of the associated data and message */
if (adlen > ROMULUS_DATA_LIMIT ||
clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE))
return -2;
/* Initialize the key schedule with the key and nonce */
romulus3_init(&ks, k, npub);
/* Decrypt the ciphertext to produce the plaintext, using the
* authentication tag as the initialization vector for decryption */
clen -= ROMULUS_TAG_SIZE;
memcpy(S, c + clen, ROMULUS_TAG_SIZE);
romulus_m3_decrypt(&ks, S, m, c, clen);
/* Re-initialize the key schedule with the key and no nonce. Associated
* data processing varies the nonce from block to block */
romulus3_init(&ks, k, 0);
/* Process the associated data */
memset(S, 0, sizeof(S));
romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen);
/* Check the authentication tag */
romulus_generate_tag(S, S);
return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE);
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LWCRYPTO_ROMULUS_H
#define LWCRYPTO_ROMULUS_H
#include "aead-common.h"
/**
* \file romulus.h
* \brief Romulus authenticated encryption algorithm family.
*
* Romulus is a family of authenticated encryption algorithms that
* are built around the SKINNY-128 tweakable block cipher. There
* are six members in the family:
*
* \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher. This is the
* primary member of the family.
* \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-256 tweakable block cipher.
* \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-256 tweakable block cipher.
*
* The Romulus-M variants are resistant to nonce reuse as long as the
* combination of the associated data and plaintext is unique. If the
* same associated data and plaintext are reused under the same nonce,
* then the scheme will leak that the same plaintext has been sent for a
* second time but will not reveal the plaintext itself.
*
* References: https://romulusae.github.io/romulus/
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Size of the key for all Romulus family members.
*/
#define ROMULUS_KEY_SIZE 16
/**
* \brief Size of the authentication tag for all Romulus family members.
*/
#define ROMULUS_TAG_SIZE 16
/**
* \brief Size of the nonce for Romulus-N1 and Romulus-M1.
*/
#define ROMULUS1_NONCE_SIZE 16
/**
* \brief Size of the nonce for Romulus-N2 and Romulus-M2.
*/
#define ROMULUS2_NONCE_SIZE 12
/**
* \brief Size of the nonce for Romulus-N3 and Romulus-M3.
*/
#define ROMULUS3_NONCE_SIZE 12
/**
* \brief Meta-information block for the Romulus-N1 cipher.
*/
extern aead_cipher_t const romulus_n1_cipher;
/**
* \brief Meta-information block for the Romulus-N2 cipher.
*/
extern aead_cipher_t const romulus_n2_cipher;
/**
* \brief Meta-information block for the Romulus-N3 cipher.
*/
extern aead_cipher_t const romulus_n3_cipher;
/**
* \brief Meta-information block for the Romulus-M1 cipher.
*/
extern aead_cipher_t const romulus_m1_cipher;
/**
* \brief Meta-information block for the Romulus-M2 cipher.
*/
extern aead_cipher_t const romulus_m2_cipher;
/**
* \brief Meta-information block for the Romulus-M3 cipher.
*/
extern aead_cipher_t const romulus_m3_cipher;
/**
* \brief Encrypts and authenticates a packet with Romulus-N1.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_n1_aead_decrypt()
*/
int romulus_n1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-N1.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_n1_aead_encrypt()
*/
int romulus_n1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-N2.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_n2_aead_decrypt()
*/
int romulus_n2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-N2.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_n2_aead_encrypt()
*/
int romulus_n2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-N3.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_n3_aead_decrypt()
*/
int romulus_n3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-N3.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_n3_aead_encrypt()
*/
int romulus_n3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-M1.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_m1_aead_decrypt()
*/
int romulus_m1_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-M1.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_m1_aead_encrypt()
*/
int romulus_m1_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-M2.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_m2_aead_decrypt()
*/
int romulus_m2_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-M2.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_m2_aead_encrypt()
*/
int romulus_m2_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with Romulus-M3.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa romulus_m3_aead_decrypt()
*/
int romulus_m3_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with Romulus-M3.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa romulus_m3_aead_encrypt()
*/
int romulus_m3_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "aead-common.h"
int aead_check_tag
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned size)
{
/* Set "accum" to -1 if the tags match, or 0 if they don't match */
int accum = 0;
while (size > 0) {
accum |= (*tag1++ ^ *tag2++);
--size;
}
accum = (accum - 1) >> 8;
/* Destroy the plaintext if the tag match failed */
while (plaintext_len > 0) {
*plaintext++ &= accum;
--plaintext_len;
}
/* If "accum" is 0, return -1, otherwise return 0 */
return ~accum;
}
int aead_check_tag_precheck
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned size, int precheck)
{
/* Set "accum" to -1 if the tags match, or 0 if they don't match */
int accum = 0;
while (size > 0) {
accum |= (*tag1++ ^ *tag2++);
--size;
}
accum = ((accum - 1) >> 8) & precheck;
/* Destroy the plaintext if the tag match failed */
while (plaintext_len > 0) {
*plaintext++ &= accum;
--plaintext_len;
}
/* If "accum" is 0, return -1, otherwise return 0 */
return ~accum;
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LWCRYPTO_AEAD_COMMON_H
#define LWCRYPTO_AEAD_COMMON_H
#include <stddef.h>
/**
* \file aead-common.h
* \brief Definitions that are common across AEAD schemes.
*
* AEAD stands for "Authenticated Encryption with Associated Data".
* It is a standard API pattern for securely encrypting and
* authenticating packets of data.
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Encrypts and authenticates a packet with an AEAD scheme.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - normally not used by AEAD schemes.
* \param npub Points to the public nonce for the packet.
* \param k Points to the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*/
typedef int (*aead_cipher_encrypt_t)
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with an AEAD scheme.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - normally not used by AEAD schemes.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet.
* \param k Points to the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*/
typedef int (*aead_cipher_decrypt_t)
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Hashes a block of input data.
*
* \param out Buffer to receive the hash output.
* \param in Points to the input data to be hashed.
* \param inlen Length of the input data in bytes.
*
* \return Returns zero on success or -1 if there was an error in the
* parameters.
*/
typedef int (*aead_hash_t)
(unsigned char *out, const unsigned char *in, unsigned long long inlen);
/**
* \brief Initializes the state for a hashing operation.
*
* \param state Hash state to be initialized.
*/
typedef void (*aead_hash_init_t)(void *state);
/**
* \brief Updates a hash state with more input data.
*
* \param state Hash state to be updated.
* \param in Points to the input data to be incorporated into the state.
* \param inlen Length of the input data to be incorporated into the state.
*/
typedef void (*aead_hash_update_t)
(void *state, const unsigned char *in, unsigned long long inlen);
/**
* \brief Returns the final hash value from a hashing operation.
*
* \param Hash state to be finalized.
* \param out Points to the output buffer to receive the hash value.
*/
typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out);
/**
* \brief Aborbs more input data into an XOF state.
*
* \param state XOF state to be updated.
* \param in Points to the input data to be absorbed into the state.
* \param inlen Length of the input data to be absorbed into the state.
*
* \sa ascon_xof_init(), ascon_xof_squeeze()
*/
typedef void (*aead_xof_absorb_t)
(void *state, const unsigned char *in, unsigned long long inlen);
/**
* \brief Squeezes output data from an XOF state.
*
* \param state XOF state to squeeze the output data from.
* \param out Points to the output buffer to receive the squeezed data.
* \param outlen Number of bytes of data to squeeze out of the state.
*/
typedef void (*aead_xof_squeeze_t)
(void *state, unsigned char *out, unsigned long long outlen);
/**
* \brief No special AEAD features.
*/
#define AEAD_FLAG_NONE 0x0000
/**
* \brief The natural byte order of the AEAD cipher is little-endian.
*
* If this flag is not present, then the natural byte order of the
* AEAD cipher should be assumed to be big-endian.
*
* The natural byte order may be useful when formatting packet sequence
* numbers as nonces. The application needs to know whether the sequence
* number should be packed into the leading or trailing bytes of the nonce.
*/
#define AEAD_FLAG_LITTLE_ENDIAN 0x0001
/**
* \brief Meta-information about an AEAD cipher.
*/
typedef struct
{
const char *name; /**< Name of the cipher */
unsigned key_len; /**< Length of the key in bytes */
unsigned nonce_len; /**< Length of the nonce in bytes */
unsigned tag_len; /**< Length of the tag in bytes */
unsigned flags; /**< Flags for extra features */
aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */
aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */
} aead_cipher_t;
/**
* \brief Meta-information about a hash algorithm that is related to an AEAD.
*
* Regular hash algorithms should provide the "hash", "init", "update",
* and "finalize" functions. Extensible Output Functions (XOF's) should
* proivde the "hash", "init", "absorb", and "squeeze" functions.
*/
typedef struct
{
const char *name; /**< Name of the hash algorithm */
size_t state_size; /**< Size of the incremental state structure */
unsigned hash_len; /**< Length of the hash in bytes */
unsigned flags; /**< Flags for extra features */
aead_hash_t hash; /**< All in one hashing function */
aead_hash_init_t init; /**< Incremental hash/XOF init function */
aead_hash_update_t update; /**< Incremental hash update function */
aead_hash_finalize_t finalize; /**< Incremental hash finalize function */
aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */
aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */
} aead_hash_algorithm_t;
/**
* \brief Check an authentication tag in constant time.
*
* \param plaintext Points to the plaintext data.
* \param plaintext_len Length of the plaintext in bytes.
* \param tag1 First tag to compare.
* \param tag2 Second tag to compare.
* \param tag_len Length of the tags in bytes.
*
* \return Returns -1 if the tag check failed or 0 if the check succeeded.
*
* If the tag check fails, then the \a plaintext will also be zeroed to
* prevent it from being used accidentally by the application when the
* ciphertext was invalid.
*/
int aead_check_tag
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned tag_len);
/**
* \brief Check an authentication tag in constant time with a previous check.
*
* \param plaintext Points to the plaintext data.
* \param plaintext_len Length of the plaintext in bytes.
* \param tag1 First tag to compare.
* \param tag2 Second tag to compare.
* \param tag_len Length of the tags in bytes.
* \param precheck Set to -1 if previous check succeeded or 0 if it failed.
*
* \return Returns -1 if the tag check failed or 0 if the check succeeded.
*
* If the tag check fails, then the \a plaintext will also be zeroed to
* prevent it from being used accidentally by the application when the
* ciphertext was invalid.
*
* This version can be used to incorporate other information about the
* correctness of the plaintext into the final result.
*/
int aead_check_tag_precheck
(unsigned char *plaintext, unsigned long long plaintext_len,
const unsigned char *tag1, const unsigned char *tag2,
unsigned tag_len, int precheck);
#ifdef __cplusplus
}
#endif
#endif
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "skinny-aead.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return skinny_aead_m1_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return skinny_aead_m1_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-skinny128.h"
#include "internal-skinnyutil.h"
#include "internal-util.h"
#include <string.h>
#if !defined(__AVR__)
STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk)
{
/* This function is used to fast-forward the TK1 tweak value
* to the value at the end of the key schedule for decryption.
*
* The tweak permutation repeats every 16 rounds, so SKINNY-128-256
* with 48 rounds does not need any fast forwarding applied.
* SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds
* are equivalent to applying the permutation 8 times:
*
* PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12]
*/
uint32_t row0 = tk[0];
uint32_t row1 = tk[1];
uint32_t row2 = tk[2];
uint32_t row3 = tk[3];
tk[0] = ((row1 >> 8) & 0x0000FFFFU) |
((row0 >> 8) & 0x00FF0000U) |
((row0 << 8) & 0xFF000000U);
tk[1] = ((row1 >> 24) & 0x000000FFU) |
((row0 << 8) & 0x00FFFF00U) |
((row1 << 24) & 0xFF000000U);
tk[2] = ((row3 >> 8) & 0x0000FFFFU) |
((row2 >> 8) & 0x00FF0000U) |
((row2 << 8) & 0xFF000000U);
tk[3] = ((row3 >> 24) & 0x000000FFU) |
((row2 << 8) & 0x00FFFF00U) |
((row3 << 24) & 0xFF000000U);
}
void skinny_128_384_init
(skinny_128_384_key_schedule_t *ks, const unsigned char key[48])
{
#if !SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint32_t *schedule;
unsigned round;
uint8_t rc;
#endif
#if SKINNY_128_SMALL_SCHEDULE
/* Copy the input key as-is when using the small key schedule version */
memcpy(ks->TK1, key, sizeof(ks->TK1));
memcpy(ks->TK2, key + 16, sizeof(ks->TK2));
memcpy(ks->TK3, key + 32, sizeof(ks->TK3));
#else
/* Set the initial states of TK1, TK2, and TK3 */
memcpy(ks->TK1, key, 16);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
TK3[0] = le_load_word32(key + 32);
TK3[1] = le_load_word32(key + 36);
TK3[2] = le_load_word32(key + 40);
TK3[3] = le_load_word32(key + 44);
/* Set up the key schedule using TK2 and TK3. TK1 is not added
* to the key schedule because we will derive that part of the
* schedule during encryption operations */
schedule = ks->k;
rc = 0;
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) {
/* XOR the round constants with the current schedule words.
* The round constants for the 3rd and 4th rows are
* fixed and will be applied during encryption. */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F);
schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4);
/* Permute TK2 and TK3 for the next round */
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
/* Apply the LFSR's to TK2 and TK3 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
}
#endif
}
void skinny_128_384_encrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 for the next round */
skinny128_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_decrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t TK3[4];
uint8_t rc = 0x15;
#else
const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]);
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1 */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Permute TK1 to fast-forward it to the end of the key schedule */
skinny128_fast_forward_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_fast_forward_tk(TK2);
skinny128_fast_forward_tk(TK3);
for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) {
/* Also fast-forward the LFSR's on every byte of TK2 and TK3 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR2(TK2[2]);
skinny128_LFSR2(TK2[3]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
skinny128_LFSR3(TK3[2]);
skinny128_LFSR3(TK3[3]);
}
#endif
/* Perform all decryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Inverse permutation on TK1 for this round */
skinny128_inv_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_inv_permute_tk(TK2);
skinny128_inv_permute_tk(TK3);
skinny128_LFSR3(TK2[2]);
skinny128_LFSR3(TK2[3]);
skinny128_LFSR2(TK3[2]);
skinny128_LFSR2(TK3[3]);
#endif
/* Inverse mix of the columns */
temp = s3;
s3 = s0;
s0 = s1;
s1 = s2;
s3 ^= temp;
s2 = temp ^ s0;
s1 ^= s2;
/* Inverse shift of the rows */
s1 = leftRotate24(s1);
s2 = leftRotate16(s2);
s3 = leftRotate8(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20);
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
schedule -= 2;
#endif
s2 ^= 0x02;
/* Apply the inverse of the S-box to all bytes in the state */
skinny128_inv_sbox(s0);
skinny128_inv_sbox(s1);
skinny128_inv_sbox(s2);
skinny128_inv_sbox(s3);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK3[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
TK2[0] = le_load_word32(tk2);
TK2[1] = le_load_word32(tk2 + 4);
TK2[2] = le_load_word32(tk2 + 8);
TK2[3] = le_load_word32(tk2 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK3[0] = le_load_word32(ks->TK3);
TK3[1] = le_load_word32(ks->TK3 + 4);
TK3[2] = le_load_word32(ks->TK3 + 8);
TK3[3] = le_load_word32(ks->TK3 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0] ^ TK2[0];
s1 ^= schedule[1] ^ TK1[1] ^ TK2[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK3);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_384_encrypt_tk_full
(const unsigned char key[48], unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
uint32_t TK3[4];
uint32_t temp;
unsigned round;
uint8_t rc = 0;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakey */
TK1[0] = le_load_word32(key);
TK1[1] = le_load_word32(key + 4);
TK1[2] = le_load_word32(key + 8);
TK1[3] = le_load_word32(key + 12);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
TK3[0] = le_load_word32(key + 32);
TK3[1] = le_load_word32(key + 36);
TK3[2] = le_load_word32(key + 40);
TK3[3] = le_load_word32(key + 44);
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4);
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1, TK2, and TK3 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_permute_tk(TK3);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR3(TK3[0]);
skinny128_LFSR3(TK3[1]);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_init
(skinny_128_256_key_schedule_t *ks, const unsigned char key[32])
{
#if !SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint32_t *schedule;
unsigned round;
uint8_t rc;
#endif
#if SKINNY_128_SMALL_SCHEDULE
/* Copy the input key as-is when using the small key schedule version */
memcpy(ks->TK1, key, sizeof(ks->TK1));
memcpy(ks->TK2, key + 16, sizeof(ks->TK2));
#else
/* Set the initial states of TK1 and TK2 */
memcpy(ks->TK1, key, 16);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
/* Set up the key schedule using TK2. TK1 is not added
* to the key schedule because we will derive that part of the
* schedule during encryption operations */
schedule = ks->k;
rc = 0;
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) {
/* XOR the round constants with the current schedule words.
* The round constants for the 3rd and 4th rows are
* fixed and will be applied during encryption. */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
schedule[0] = TK2[0] ^ (rc & 0x0F);
schedule[1] = TK2[1] ^ (rc >> 4);
/* Permute TK2 for the next round */
skinny128_permute_tk(TK2);
/* Apply the LFSR to TK2 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
}
#endif
}
void skinny_128_256_encrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint8_t rc = 0;
#else
const uint32_t *schedule = ks->k;
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1 */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
#endif
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
#endif
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
#else
schedule += 2;
#endif
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_decrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
#if SKINNY_128_SMALL_SCHEDULE
uint32_t TK2[4];
uint8_t rc = 0x09;
#else
const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]);
#endif
uint32_t temp;
unsigned round;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakable part of the state, TK1.
* There is no need to fast-forward TK1 because the value at
* the end of the key schedule is the same as at the start */
TK1[0] = le_load_word32(ks->TK1);
TK1[1] = le_load_word32(ks->TK1 + 4);
TK1[2] = le_load_word32(ks->TK1 + 8);
TK1[3] = le_load_word32(ks->TK1 + 12);
#if SKINNY_128_SMALL_SCHEDULE
TK2[0] = le_load_word32(ks->TK2);
TK2[1] = le_load_word32(ks->TK2 + 4);
TK2[2] = le_load_word32(ks->TK2 + 8);
TK2[3] = le_load_word32(ks->TK2 + 12);
for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) {
/* Also fast-forward the LFSR's on every byte of TK2 */
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
skinny128_LFSR2(TK2[2]);
skinny128_LFSR2(TK2[3]);
}
#endif
/* Perform all decryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Inverse permutation on TK1 for this round */
skinny128_inv_permute_tk(TK1);
#if SKINNY_128_SMALL_SCHEDULE
skinny128_inv_permute_tk(TK2);
skinny128_LFSR3(TK2[2]);
skinny128_LFSR3(TK2[3]);
#endif
/* Inverse mix of the columns */
temp = s3;
s3 = s0;
s0 = s1;
s1 = s2;
s3 ^= temp;
s2 = temp ^ s0;
s1 ^= s2;
/* Inverse shift of the rows */
s1 = leftRotate24(s1);
s2 = leftRotate16(s2);
s3 = leftRotate8(s3);
/* Apply the subkey for this round */
#if SKINNY_128_SMALL_SCHEDULE
rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20);
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
#else
s0 ^= schedule[0] ^ TK1[0];
s1 ^= schedule[1] ^ TK1[1];
schedule -= 2;
#endif
s2 ^= 0x02;
/* Apply the inverse of the S-box to all bytes in the state */
skinny128_inv_sbox(s0);
skinny128_inv_sbox(s1);
skinny128_inv_sbox(s2);
skinny128_inv_sbox(s3);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
void skinny_128_256_encrypt_tk_full
(const unsigned char key[32], unsigned char *output,
const unsigned char *input)
{
uint32_t s0, s1, s2, s3;
uint32_t TK1[4];
uint32_t TK2[4];
uint32_t temp;
unsigned round;
uint8_t rc = 0;
/* Unpack the input block into the state array */
s0 = le_load_word32(input);
s1 = le_load_word32(input + 4);
s2 = le_load_word32(input + 8);
s3 = le_load_word32(input + 12);
/* Make a local copy of the tweakey */
TK1[0] = le_load_word32(key);
TK1[1] = le_load_word32(key + 4);
TK1[2] = le_load_word32(key + 8);
TK1[3] = le_load_word32(key + 12);
TK2[0] = le_load_word32(key + 16);
TK2[1] = le_load_word32(key + 20);
TK2[2] = le_load_word32(key + 24);
TK2[3] = le_load_word32(key + 28);
/* Perform all encryption rounds */
for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) {
/* Apply the S-box to all bytes in the state */
skinny128_sbox(s0);
skinny128_sbox(s1);
skinny128_sbox(s2);
skinny128_sbox(s3);
/* XOR the round constant and the subkey for this round */
rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01;
rc &= 0x3F;
s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F);
s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4);
s2 ^= 0x02;
/* Shift the cells in the rows right, which moves the cell
* values up closer to the MSB. That is, we do a left rotate
* on the word to rotate the cells in the word right */
s1 = leftRotate8(s1);
s2 = leftRotate16(s2);
s3 = leftRotate24(s3);
/* Mix the columns */
s1 ^= s2;
s2 ^= s0;
temp = s3 ^ s2;
s3 = s2;
s2 = s1;
s1 = s0;
s0 = temp;
/* Permute TK1 and TK2 for the next round */
skinny128_permute_tk(TK1);
skinny128_permute_tk(TK2);
skinny128_LFSR2(TK2[0]);
skinny128_LFSR2(TK2[1]);
}
/* Pack the result into the output buffer */
le_store_word32(output, s0);
le_store_word32(output + 4, s1);
le_store_word32(output + 8, s2);
le_store_word32(output + 12, s3);
}
#else /* __AVR__ */
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2)
{
memcpy(ks->TK2, tk2, 16);
skinny_128_384_encrypt(ks, output, input);
}
#endif /* __AVR__ */
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_SKINNY128_H
#define LW_INTERNAL_SKINNY128_H
/**
* \file internal-skinny128.h
* \brief SKINNY-128 block cipher family.
*
* References: https://eprint.iacr.org/2016/660.pdf,
* https://sites.google.com/site/skinnycipher/
*/
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* \def SKINNY_128_SMALL_SCHEDULE
* \brief Defined to 1 to use the small key schedule version of SKINNY-128.
*/
#if defined(__AVR__)
#define SKINNY_128_SMALL_SCHEDULE 1
#else
#define SKINNY_128_SMALL_SCHEDULE 0
#endif
/**
* \brief Size of a block for SKINNY-128 block ciphers.
*/
#define SKINNY_128_BLOCK_SIZE 16
/**
* \brief Number of rounds for SKINNY-128-384.
*/
#define SKINNY_128_384_ROUNDS 56
/**
* \brief Structure of the key schedule for SKINNY-128-384.
*/
typedef struct
{
/** TK1 for the tweakable part of the key schedule */
uint8_t TK1[16];
#if SKINNY_128_SMALL_SCHEDULE
/** TK2 for the small key schedule */
uint8_t TK2[16];
/** TK3 for the small key schedule */
uint8_t TK3[16];
#else
/** Words of the full key schedule */
uint32_t k[SKINNY_128_384_ROUNDS * 2];
#endif
} skinny_128_384_key_schedule_t;
/**
* \brief Initializes the key schedule for SKINNY-128-384.
*
* \param ks Points to the key schedule to initialize.
* \param key Points to the key data.
*/
void skinny_128_384_init
(skinny_128_384_key_schedule_t *ks, const unsigned char key[48]);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_384_encrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Decrypts a 128-bit block with SKINNY-128-384.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_384_decrypt
(const skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly
* provided TK2 value.
*
* \param ks Points to the SKINNY-128-384 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
* \param tk2 TK2 value that should be updated on the fly.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when both TK1 and TK2 change from block to block.
* When the key is initialized with skinny_128_384_init(), the TK2 part of
* the key value should be set to zero.
*
* \note Some versions of this function may modify the key schedule to
* copy tk2 into place.
*/
void skinny_128_384_encrypt_tk2
(skinny_128_384_key_schedule_t *ks, unsigned char *output,
const unsigned char *input, const unsigned char *tk2);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-384 and a
* fully specified tweakey value.
*
* \param key Points to the 384-bit tweakey value.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when the entire tweakey changes from block to
* block. It is slower than the other versions of SKINNY-128-384 but
* more memory-efficient.
*/
void skinny_128_384_encrypt_tk_full
(const unsigned char key[48], unsigned char *output,
const unsigned char *input);
/**
* \brief Number of rounds for SKINNY-128-256.
*/
#define SKINNY_128_256_ROUNDS 48
/**
* \brief Structure of the key schedule for SKINNY-128-256.
*/
typedef struct
{
/** TK1 for the tweakable part of the key schedule */
uint8_t TK1[16];
#if SKINNY_128_SMALL_SCHEDULE
/** TK2 for the small key schedule */
uint8_t TK2[16];
#else
/** Words of the full key schedule */
uint32_t k[SKINNY_128_256_ROUNDS * 2];
#endif
} skinny_128_256_key_schedule_t;
/**
* \brief Initializes the key schedule for SKINNY-128-256.
*
* \param ks Points to the key schedule to initialize.
* \param key Points to the key data.
*/
void skinny_128_256_init
(skinny_128_256_key_schedule_t *ks, const unsigned char key[32]);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-256.
*
* \param ks Points to the SKINNY-128-256 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_256_encrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Decrypts a 128-bit block with SKINNY-128-256.
*
* \param ks Points to the SKINNY-128-256 key schedule.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*/
void skinny_128_256_decrypt
(const skinny_128_256_key_schedule_t *ks, unsigned char *output,
const unsigned char *input);
/**
* \brief Encrypts a 128-bit block with SKINNY-128-256 and a
* fully specified tweakey value.
*
* \param key Points to the 256-bit tweakey value.
* \param output Output buffer which must be at least 16 bytes in length.
* \param input Input buffer which must be at least 16 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* This version is useful when the entire tweakey changes from block to
* block. It is slower than the other versions of SKINNY-128-256 but
* more memory-efficient.
*/
void skinny_128_256_encrypt_tk_full
(const unsigned char key[32], unsigned char *output,
const unsigned char *input);
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_SKINNYUTIL_H
#define LW_INTERNAL_SKINNYUTIL_H
/**
* \file internal-skinnyutil.h
* \brief Utilities to help implement SKINNY and its variants.
*/
#include "internal-util.h"
#ifdef __cplusplus
extern "C" {
#endif
/** @cond skinnyutil */
/* Utilities for implementing SKINNY-128 */
#define skinny128_LFSR2(x) \
do { \
uint32_t _x = (x); \
(x) = ((_x << 1) & 0xFEFEFEFEU) ^ \
(((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \
} while (0)
#define skinny128_LFSR3(x) \
do { \
uint32_t _x = (x); \
(x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \
(((_x << 7) ^ (_x << 1)) & 0x80808080U); \
} while (0)
/* LFSR2 and LFSR3 are inverses of each other */
#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x)
#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x)
#define skinny128_permute_tk(tk) \
do { \
/* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \
uint32_t row2 = tk[2]; \
uint32_t row3 = tk[3]; \
tk[2] = tk[0]; \
tk[3] = tk[1]; \
row3 = (row3 << 16) | (row3 >> 16); \
tk[0] = ((row2 >> 8) & 0x000000FFU) | \
((row2 << 16) & 0x00FF0000U) | \
( row3 & 0xFF00FF00U); \
tk[1] = ((row2 >> 16) & 0x000000FFU) | \
(row2 & 0xFF000000U) | \
((row3 << 8) & 0x0000FF00U) | \
( row3 & 0x00FF0000U); \
} while (0)
#define skinny128_inv_permute_tk(tk) \
do { \
/* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \
uint32_t row0 = tk[0]; \
uint32_t row1 = tk[1]; \
tk[0] = tk[2]; \
tk[1] = tk[3]; \
tk[2] = ((row0 >> 16) & 0x000000FFU) | \
((row0 << 8) & 0x0000FF00U) | \
((row1 << 16) & 0x00FF0000U) | \
( row1 & 0xFF000000U); \
tk[3] = ((row0 >> 16) & 0x0000FF00U) | \
((row0 << 16) & 0xFF000000U) | \
((row1 >> 16) & 0x000000FFU) | \
((row1 << 8) & 0x00FF0000U); \
} while (0)
/*
* Apply the SKINNY sbox. The original version from the specification is
* equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
* #define SBOX_SWAP(x)
* (((x) & 0xF9F9F9F9U) |
* (((x) >> 1) & 0x02020202U) |
* (((x) << 1) & 0x04040404U))
* #define SBOX_PERMUTE(x)
* ((((x) & 0x01010101U) << 2) |
* (((x) & 0x06060606U) << 5) |
* (((x) & 0x20202020U) >> 5) |
* (((x) & 0xC8C8C8C8U) >> 2) |
* (((x) & 0x10101010U) >> 1))
*
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE(x);
* x = SBOX_MIX(x);
* return SBOX_SWAP(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one
* final permuatation. This reduces the number of shift operations.
*/
#define skinny128_sbox(x) \
do { \
uint32_t y; \
\
/* Mix the bits */ \
x = ~x; \
x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \
y = (((x << 5) & (x << 1)) & 0x20202020U); \
x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \
y = (((x << 2) & (x << 1)) & 0x80808080U); \
x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \
y = (((x >> 5) & (x << 1)) & 0x04040404U); \
x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \
x = ~x; \
\
/* Permutation generated by http://programming.sirrida.de/calcperm.php */ \
/* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \
x = ((x & 0x08080808U) << 1) | \
((x & 0x32323232U) << 2) | \
((x & 0x01010101U) << 5) | \
((x & 0x80808080U) >> 6) | \
((x & 0x40404040U) >> 4) | \
((x & 0x04040404U) >> 2); \
} while (0)
/*
* Apply the inverse of the SKINNY sbox. The original version from the
* specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x))
* #define SBOX_SWAP(x)
* (((x) & 0xF9F9F9F9U) |
* (((x) >> 1) & 0x02020202U) |
* (((x) << 1) & 0x04040404U))
* #define SBOX_PERMUTE_INV(x)
* ((((x) & 0x08080808U) << 1) |
* (((x) & 0x32323232U) << 2) |
* (((x) & 0x01010101U) << 5) |
* (((x) & 0xC0C0C0C0U) >> 5) |
* (((x) & 0x04040404U) >> 2))
*
* x = SBOX_SWAP(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_PERMUTE_INV(x);
* return SBOX_MIX(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one
* final permuatation. This reduces the number of shift operations.
*/
#define skinny128_inv_sbox(x) \
do { \
uint32_t y; \
\
/* Mix the bits */ \
x = ~x; \
y = (((x >> 1) & (x >> 3)) & 0x01010101U); \
x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \
y = (((x >> 6) & (x >> 1)) & 0x02020202U); \
x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \
y = (((x << 2) & (x << 1)) & 0x80808080U); \
x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \
y = (((x << 5) & (x << 1)) & 0x20202020U); \
x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \
x = ~x; \
\
/* Permutation generated by http://programming.sirrida.de/calcperm.php */ \
/* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \
x = ((x & 0x01010101U) << 2) | \
((x & 0x04040404U) << 4) | \
((x & 0x02020202U) << 6) | \
((x & 0x20202020U) >> 5) | \
((x & 0xC8C8C8C8U) >> 2) | \
((x & 0x10101010U) >> 1); \
} while (0)
/* Utilities for implementing SKINNY-64 */
#define skinny64_LFSR2(x) \
do { \
uint16_t _x = (x); \
(x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \
} while (0)
#define skinny64_LFSR3(x) \
do { \
uint16_t _x = (x); \
(x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \
} while (0)
/* LFSR2 and LFSR3 are inverses of each other */
#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x)
#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x)
#define skinny64_permute_tk(tk) \
do { \
/* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \
uint16_t row2 = tk[2]; \
uint16_t row3 = tk[3]; \
tk[2] = tk[0]; \
tk[3] = tk[1]; \
row3 = (row3 << 8) | (row3 >> 8); \
tk[0] = ((row2 << 4) & 0xF000U) | \
((row2 >> 8) & 0x00F0U) | \
( row3 & 0x0F0FU); \
tk[1] = ((row2 << 8) & 0xF000U) | \
((row3 >> 4) & 0x0F00U) | \
( row3 & 0x00F0U) | \
( row2 & 0x000FU); \
} while (0)
#define skinny64_inv_permute_tk(tk) \
do { \
/* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \
uint16_t row0 = tk[0]; \
uint16_t row1 = tk[1]; \
tk[0] = tk[2]; \
tk[1] = tk[3]; \
tk[2] = ((row0 << 8) & 0xF000U) | \
((row0 >> 4) & 0x0F00U) | \
((row1 >> 8) & 0x00F0U) | \
( row1 & 0x000FU); \
tk[3] = ((row1 << 8) & 0xF000U) | \
((row0 << 8) & 0x0F00U) | \
((row1 >> 4) & 0x00F0U) | \
((row0 >> 8) & 0x000FU); \
} while (0)
/*
* Apply the SKINNY-64 sbox. The original version from the
* specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x))
* #define SBOX_SHIFT(x)
* ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U))
*
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT(x);
* return SBOX_MIX(x);
*
* However, we can mix the bits in their original positions and then
* delay the SBOX_SHIFT steps to be performed with one final rotation.
* This reduces the number of required shift operations from 14 to 10.
*
* We can further reduce the number of NOT operations from 4 to 2
* using the technique from https://github.com/kste/skinny_avx to
* convert NOR-XOR operations into AND-XOR operations by converting
* the S-box into its NOT-inverse.
*/
#define skinny64_sbox(x) \
do { \
x = ~x; \
x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \
x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \
x = ~x; \
x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \
} while (0)
/*
* Apply the inverse of the SKINNY-64 sbox. The original version
* from the specification is equivalent to:
*
* #define SBOX_MIX(x)
* (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x))
* #define SBOX_SHIFT_INV(x)
* ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U))
*
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* x = SBOX_MIX(x);
* x = SBOX_SHIFT_INV(x);
* return SBOX_MIX(x);
*/
#define skinny64_inv_sbox(x) \
do { \
x = ~x; \
x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \
x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \
x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \
x = ~x; \
x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \
} while (0)
/** @endcond */
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_UTIL_H
#define LW_INTERNAL_UTIL_H
#include <stdint.h>
/* Figure out how to inline functions using this C compiler */
#if defined(__STDC__) && __STDC_VERSION__ >= 199901L
#define STATIC_INLINE static inline
#elif defined(__GNUC__) || defined(__clang__)
#define STATIC_INLINE static __inline__
#else
#define STATIC_INLINE static
#endif
/* Try to figure out whether the CPU is little-endian or big-endian.
* May need to modify this to include new compiler-specific defines.
* Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your
* compiler flags when you compile this library */
#if defined(__x86_64) || defined(__x86_64__) || \
defined(__i386) || defined(__i386__) || \
defined(__AVR__) || defined(__arm) || defined(__arm__) || \
defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \
defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \
defined(__LITTLE_ENDIAN__)
#define LW_UTIL_LITTLE_ENDIAN 1
#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \
defined(__BIG_ENDIAN__)
/* Big endian */
#else
#error "Cannot determine the endianess of this platform"
#endif
/* Helper macros to load and store values while converting endian-ness */
/* Load a big-endian 32-bit word from a byte buffer */
#define be_load_word32(ptr) \
((((uint32_t)((ptr)[0])) << 24) | \
(((uint32_t)((ptr)[1])) << 16) | \
(((uint32_t)((ptr)[2])) << 8) | \
((uint32_t)((ptr)[3])))
/* Store a big-endian 32-bit word into a byte buffer */
#define be_store_word32(ptr, x) \
do { \
uint32_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 24); \
(ptr)[1] = (uint8_t)(_x >> 16); \
(ptr)[2] = (uint8_t)(_x >> 8); \
(ptr)[3] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 32-bit word from a byte buffer */
#define le_load_word32(ptr) \
((((uint32_t)((ptr)[3])) << 24) | \
(((uint32_t)((ptr)[2])) << 16) | \
(((uint32_t)((ptr)[1])) << 8) | \
((uint32_t)((ptr)[0])))
/* Store a little-endian 32-bit word into a byte buffer */
#define le_store_word32(ptr, x) \
do { \
uint32_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
(ptr)[2] = (uint8_t)(_x >> 16); \
(ptr)[3] = (uint8_t)(_x >> 24); \
} while (0)
/* Load a big-endian 64-bit word from a byte buffer */
#define be_load_word64(ptr) \
((((uint64_t)((ptr)[0])) << 56) | \
(((uint64_t)((ptr)[1])) << 48) | \
(((uint64_t)((ptr)[2])) << 40) | \
(((uint64_t)((ptr)[3])) << 32) | \
(((uint64_t)((ptr)[4])) << 24) | \
(((uint64_t)((ptr)[5])) << 16) | \
(((uint64_t)((ptr)[6])) << 8) | \
((uint64_t)((ptr)[7])))
/* Store a big-endian 64-bit word into a byte buffer */
#define be_store_word64(ptr, x) \
do { \
uint64_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 56); \
(ptr)[1] = (uint8_t)(_x >> 48); \
(ptr)[2] = (uint8_t)(_x >> 40); \
(ptr)[3] = (uint8_t)(_x >> 32); \
(ptr)[4] = (uint8_t)(_x >> 24); \
(ptr)[5] = (uint8_t)(_x >> 16); \
(ptr)[6] = (uint8_t)(_x >> 8); \
(ptr)[7] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 64-bit word from a byte buffer */
#define le_load_word64(ptr) \
((((uint64_t)((ptr)[7])) << 56) | \
(((uint64_t)((ptr)[6])) << 48) | \
(((uint64_t)((ptr)[5])) << 40) | \
(((uint64_t)((ptr)[4])) << 32) | \
(((uint64_t)((ptr)[3])) << 24) | \
(((uint64_t)((ptr)[2])) << 16) | \
(((uint64_t)((ptr)[1])) << 8) | \
((uint64_t)((ptr)[0])))
/* Store a little-endian 64-bit word into a byte buffer */
#define le_store_word64(ptr, x) \
do { \
uint64_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
(ptr)[2] = (uint8_t)(_x >> 16); \
(ptr)[3] = (uint8_t)(_x >> 24); \
(ptr)[4] = (uint8_t)(_x >> 32); \
(ptr)[5] = (uint8_t)(_x >> 40); \
(ptr)[6] = (uint8_t)(_x >> 48); \
(ptr)[7] = (uint8_t)(_x >> 56); \
} while (0)
/* Load a big-endian 16-bit word from a byte buffer */
#define be_load_word16(ptr) \
((((uint16_t)((ptr)[0])) << 8) | \
((uint16_t)((ptr)[1])))
/* Store a big-endian 16-bit word into a byte buffer */
#define be_store_word16(ptr, x) \
do { \
uint16_t _x = (x); \
(ptr)[0] = (uint8_t)(_x >> 8); \
(ptr)[1] = (uint8_t)_x; \
} while (0)
/* Load a little-endian 16-bit word from a byte buffer */
#define le_load_word16(ptr) \
((((uint16_t)((ptr)[1])) << 8) | \
((uint16_t)((ptr)[0])))
/* Store a little-endian 16-bit word into a byte buffer */
#define le_store_word16(ptr, x) \
do { \
uint16_t _x = (x); \
(ptr)[0] = (uint8_t)_x; \
(ptr)[1] = (uint8_t)(_x >> 8); \
} while (0)
/* XOR a source byte buffer against a destination */
#define lw_xor_block(dest, src, len) \
do { \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest++ ^= *_src++; \
--_len; \
} \
} while (0)
/* XOR two source byte buffers and put the result in a destination buffer */
#define lw_xor_block_2_src(dest, src1, src2, len) \
do { \
unsigned char *_dest = (dest); \
const unsigned char *_src1 = (src1); \
const unsigned char *_src2 = (src2); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest++ = *_src1++ ^ *_src2++; \
--_len; \
} \
} while (0)
/* XOR a source byte buffer against a destination and write to another
* destination at the same time */
#define lw_xor_block_2_dest(dest2, dest, src, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
*_dest2++ = (*_dest++ ^= *_src++); \
--_len; \
} \
} while (0)
/* XOR two byte buffers and write to a destination which at the same
* time copying the contents of src2 to dest2 */
#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src1 = (src1); \
const unsigned char *_src2 = (src2); \
unsigned _len = (len); \
while (_len > 0) { \
unsigned char _temp = *_src2++; \
*_dest2++ = _temp; \
*_dest++ = *_src1++ ^ _temp; \
--_len; \
} \
} while (0)
/* XOR a source byte buffer against a destination and write to another
* destination at the same time. This version swaps the source value
* into the "dest" buffer */
#define lw_xor_block_swap(dest2, dest, src, len) \
do { \
unsigned char *_dest2 = (dest2); \
unsigned char *_dest = (dest); \
const unsigned char *_src = (src); \
unsigned _len = (len); \
while (_len > 0) { \
unsigned char _temp = *_src++; \
*_dest2++ = *_dest ^ _temp; \
*_dest++ = _temp; \
--_len; \
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
#define leftRotate(a, bits) \
(__extension__ ({ \
uint32_t _temp = (a); \
(_temp << (bits)) | (_temp >> (32 - (bits))); \
}))
/* Generic right rotate */
#define rightRotate(a, bits) \
(__extension__ ({ \
uint32_t _temp = (a); \
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
#define leftRotate2(a) (leftRotate((a), 2))
#define leftRotate3(a) (leftRotate((a), 3))
#define leftRotate4(a) (leftRotate((a), 4))
#define leftRotate5(a) (leftRotate((a), 5))
#define leftRotate6(a) (leftRotate((a), 6))
#define leftRotate7(a) (leftRotate((a), 7))
#define leftRotate8(a) (leftRotate((a), 8))
#define leftRotate9(a) (leftRotate((a), 9))
#define leftRotate10(a) (leftRotate((a), 10))
#define leftRotate11(a) (leftRotate((a), 11))
#define leftRotate12(a) (leftRotate((a), 12))
#define leftRotate13(a) (leftRotate((a), 13))
#define leftRotate14(a) (leftRotate((a), 14))
#define leftRotate15(a) (leftRotate((a), 15))
#define leftRotate16(a) (leftRotate((a), 16))
#define leftRotate17(a) (leftRotate((a), 17))
#define leftRotate18(a) (leftRotate((a), 18))
#define leftRotate19(a) (leftRotate((a), 19))
#define leftRotate20(a) (leftRotate((a), 20))
#define leftRotate21(a) (leftRotate((a), 21))
#define leftRotate22(a) (leftRotate((a), 22))
#define leftRotate23(a) (leftRotate((a), 23))
#define leftRotate24(a) (leftRotate((a), 24))
#define leftRotate25(a) (leftRotate((a), 25))
#define leftRotate26(a) (leftRotate((a), 26))
#define leftRotate27(a) (leftRotate((a), 27))
#define leftRotate28(a) (leftRotate((a), 28))
#define leftRotate29(a) (leftRotate((a), 29))
#define leftRotate30(a) (leftRotate((a), 30))
#define leftRotate31(a) (leftRotate((a), 31))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1(a) (rightRotate((a), 1))
#define rightRotate2(a) (rightRotate((a), 2))
#define rightRotate3(a) (rightRotate((a), 3))
#define rightRotate4(a) (rightRotate((a), 4))
#define rightRotate5(a) (rightRotate((a), 5))
#define rightRotate6(a) (rightRotate((a), 6))
#define rightRotate7(a) (rightRotate((a), 7))
#define rightRotate8(a) (rightRotate((a), 8))
#define rightRotate9(a) (rightRotate((a), 9))
#define rightRotate10(a) (rightRotate((a), 10))
#define rightRotate11(a) (rightRotate((a), 11))
#define rightRotate12(a) (rightRotate((a), 12))
#define rightRotate13(a) (rightRotate((a), 13))
#define rightRotate14(a) (rightRotate((a), 14))
#define rightRotate15(a) (rightRotate((a), 15))
#define rightRotate16(a) (rightRotate((a), 16))
#define rightRotate17(a) (rightRotate((a), 17))
#define rightRotate18(a) (rightRotate((a), 18))
#define rightRotate19(a) (rightRotate((a), 19))
#define rightRotate20(a) (rightRotate((a), 20))
#define rightRotate21(a) (rightRotate((a), 21))
#define rightRotate22(a) (rightRotate((a), 22))
#define rightRotate23(a) (rightRotate((a), 23))
#define rightRotate24(a) (rightRotate((a), 24))
#define rightRotate25(a) (rightRotate((a), 25))
#define rightRotate26(a) (rightRotate((a), 26))
#define rightRotate27(a) (rightRotate((a), 27))
#define rightRotate28(a) (rightRotate((a), 28))
#define rightRotate29(a) (rightRotate((a), 29))
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
#define leftRotate_64(a, bits) \
(__extension__ ({ \
uint64_t _temp = (a); \
(_temp << (bits)) | (_temp >> (64 - (bits))); \
}))
/* Generic right rotate */
#define rightRotate_64(a, bits) \
(__extension__ ({ \
uint64_t _temp = (a); \
(_temp >> (bits)) | (_temp << (64 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_64(a) (leftRotate_64((a), 1))
#define leftRotate2_64(a) (leftRotate_64((a), 2))
#define leftRotate3_64(a) (leftRotate_64((a), 3))
#define leftRotate4_64(a) (leftRotate_64((a), 4))
#define leftRotate5_64(a) (leftRotate_64((a), 5))
#define leftRotate6_64(a) (leftRotate_64((a), 6))
#define leftRotate7_64(a) (leftRotate_64((a), 7))
#define leftRotate8_64(a) (leftRotate_64((a), 8))
#define leftRotate9_64(a) (leftRotate_64((a), 9))
#define leftRotate10_64(a) (leftRotate_64((a), 10))
#define leftRotate11_64(a) (leftRotate_64((a), 11))
#define leftRotate12_64(a) (leftRotate_64((a), 12))
#define leftRotate13_64(a) (leftRotate_64((a), 13))
#define leftRotate14_64(a) (leftRotate_64((a), 14))
#define leftRotate15_64(a) (leftRotate_64((a), 15))
#define leftRotate16_64(a) (leftRotate_64((a), 16))
#define leftRotate17_64(a) (leftRotate_64((a), 17))
#define leftRotate18_64(a) (leftRotate_64((a), 18))
#define leftRotate19_64(a) (leftRotate_64((a), 19))
#define leftRotate20_64(a) (leftRotate_64((a), 20))
#define leftRotate21_64(a) (leftRotate_64((a), 21))
#define leftRotate22_64(a) (leftRotate_64((a), 22))
#define leftRotate23_64(a) (leftRotate_64((a), 23))
#define leftRotate24_64(a) (leftRotate_64((a), 24))
#define leftRotate25_64(a) (leftRotate_64((a), 25))
#define leftRotate26_64(a) (leftRotate_64((a), 26))
#define leftRotate27_64(a) (leftRotate_64((a), 27))
#define leftRotate28_64(a) (leftRotate_64((a), 28))
#define leftRotate29_64(a) (leftRotate_64((a), 29))
#define leftRotate30_64(a) (leftRotate_64((a), 30))
#define leftRotate31_64(a) (leftRotate_64((a), 31))
#define leftRotate32_64(a) (leftRotate_64((a), 32))
#define leftRotate33_64(a) (leftRotate_64((a), 33))
#define leftRotate34_64(a) (leftRotate_64((a), 34))
#define leftRotate35_64(a) (leftRotate_64((a), 35))
#define leftRotate36_64(a) (leftRotate_64((a), 36))
#define leftRotate37_64(a) (leftRotate_64((a), 37))
#define leftRotate38_64(a) (leftRotate_64((a), 38))
#define leftRotate39_64(a) (leftRotate_64((a), 39))
#define leftRotate40_64(a) (leftRotate_64((a), 40))
#define leftRotate41_64(a) (leftRotate_64((a), 41))
#define leftRotate42_64(a) (leftRotate_64((a), 42))
#define leftRotate43_64(a) (leftRotate_64((a), 43))
#define leftRotate44_64(a) (leftRotate_64((a), 44))
#define leftRotate45_64(a) (leftRotate_64((a), 45))
#define leftRotate46_64(a) (leftRotate_64((a), 46))
#define leftRotate47_64(a) (leftRotate_64((a), 47))
#define leftRotate48_64(a) (leftRotate_64((a), 48))
#define leftRotate49_64(a) (leftRotate_64((a), 49))
#define leftRotate50_64(a) (leftRotate_64((a), 50))
#define leftRotate51_64(a) (leftRotate_64((a), 51))
#define leftRotate52_64(a) (leftRotate_64((a), 52))
#define leftRotate53_64(a) (leftRotate_64((a), 53))
#define leftRotate54_64(a) (leftRotate_64((a), 54))
#define leftRotate55_64(a) (leftRotate_64((a), 55))
#define leftRotate56_64(a) (leftRotate_64((a), 56))
#define leftRotate57_64(a) (leftRotate_64((a), 57))
#define leftRotate58_64(a) (leftRotate_64((a), 58))
#define leftRotate59_64(a) (leftRotate_64((a), 59))
#define leftRotate60_64(a) (leftRotate_64((a), 60))
#define leftRotate61_64(a) (leftRotate_64((a), 61))
#define leftRotate62_64(a) (leftRotate_64((a), 62))
#define leftRotate63_64(a) (leftRotate_64((a), 63))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_64(a) (rightRotate_64((a), 1))
#define rightRotate2_64(a) (rightRotate_64((a), 2))
#define rightRotate3_64(a) (rightRotate_64((a), 3))
#define rightRotate4_64(a) (rightRotate_64((a), 4))
#define rightRotate5_64(a) (rightRotate_64((a), 5))
#define rightRotate6_64(a) (rightRotate_64((a), 6))
#define rightRotate7_64(a) (rightRotate_64((a), 7))
#define rightRotate8_64(a) (rightRotate_64((a), 8))
#define rightRotate9_64(a) (rightRotate_64((a), 9))
#define rightRotate10_64(a) (rightRotate_64((a), 10))
#define rightRotate11_64(a) (rightRotate_64((a), 11))
#define rightRotate12_64(a) (rightRotate_64((a), 12))
#define rightRotate13_64(a) (rightRotate_64((a), 13))
#define rightRotate14_64(a) (rightRotate_64((a), 14))
#define rightRotate15_64(a) (rightRotate_64((a), 15))
#define rightRotate16_64(a) (rightRotate_64((a), 16))
#define rightRotate17_64(a) (rightRotate_64((a), 17))
#define rightRotate18_64(a) (rightRotate_64((a), 18))
#define rightRotate19_64(a) (rightRotate_64((a), 19))
#define rightRotate20_64(a) (rightRotate_64((a), 20))
#define rightRotate21_64(a) (rightRotate_64((a), 21))
#define rightRotate22_64(a) (rightRotate_64((a), 22))
#define rightRotate23_64(a) (rightRotate_64((a), 23))
#define rightRotate24_64(a) (rightRotate_64((a), 24))
#define rightRotate25_64(a) (rightRotate_64((a), 25))
#define rightRotate26_64(a) (rightRotate_64((a), 26))
#define rightRotate27_64(a) (rightRotate_64((a), 27))
#define rightRotate28_64(a) (rightRotate_64((a), 28))
#define rightRotate29_64(a) (rightRotate_64((a), 29))
#define rightRotate30_64(a) (rightRotate_64((a), 30))
#define rightRotate31_64(a) (rightRotate_64((a), 31))
#define rightRotate32_64(a) (rightRotate_64((a), 32))
#define rightRotate33_64(a) (rightRotate_64((a), 33))
#define rightRotate34_64(a) (rightRotate_64((a), 34))
#define rightRotate35_64(a) (rightRotate_64((a), 35))
#define rightRotate36_64(a) (rightRotate_64((a), 36))
#define rightRotate37_64(a) (rightRotate_64((a), 37))
#define rightRotate38_64(a) (rightRotate_64((a), 38))
#define rightRotate39_64(a) (rightRotate_64((a), 39))
#define rightRotate40_64(a) (rightRotate_64((a), 40))
#define rightRotate41_64(a) (rightRotate_64((a), 41))
#define rightRotate42_64(a) (rightRotate_64((a), 42))
#define rightRotate43_64(a) (rightRotate_64((a), 43))
#define rightRotate44_64(a) (rightRotate_64((a), 44))
#define rightRotate45_64(a) (rightRotate_64((a), 45))
#define rightRotate46_64(a) (rightRotate_64((a), 46))
#define rightRotate47_64(a) (rightRotate_64((a), 47))
#define rightRotate48_64(a) (rightRotate_64((a), 48))
#define rightRotate49_64(a) (rightRotate_64((a), 49))
#define rightRotate50_64(a) (rightRotate_64((a), 50))
#define rightRotate51_64(a) (rightRotate_64((a), 51))
#define rightRotate52_64(a) (rightRotate_64((a), 52))
#define rightRotate53_64(a) (rightRotate_64((a), 53))
#define rightRotate54_64(a) (rightRotate_64((a), 54))
#define rightRotate55_64(a) (rightRotate_64((a), 55))
#define rightRotate56_64(a) (rightRotate_64((a), 56))
#define rightRotate57_64(a) (rightRotate_64((a), 57))
#define rightRotate58_64(a) (rightRotate_64((a), 58))
#define rightRotate59_64(a) (rightRotate_64((a), 59))
#define rightRotate60_64(a) (rightRotate_64((a), 60))
#define rightRotate61_64(a) (rightRotate_64((a), 61))
#define rightRotate62_64(a) (rightRotate_64((a), 62))
#define rightRotate63_64(a) (rightRotate_64((a), 63))
/* Rotate a 16-bit value left by a number of bits */
#define leftRotate_16(a, bits) \
(__extension__ ({ \
uint16_t _temp = (a); \
(_temp << (bits)) | (_temp >> (16 - (bits))); \
}))
/* Rotate a 16-bit value right by a number of bits */
#define rightRotate_16(a, bits) \
(__extension__ ({ \
uint16_t _temp = (a); \
(_temp >> (bits)) | (_temp << (16 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_16(a) (leftRotate_16((a), 1))
#define leftRotate2_16(a) (leftRotate_16((a), 2))
#define leftRotate3_16(a) (leftRotate_16((a), 3))
#define leftRotate4_16(a) (leftRotate_16((a), 4))
#define leftRotate5_16(a) (leftRotate_16((a), 5))
#define leftRotate6_16(a) (leftRotate_16((a), 6))
#define leftRotate7_16(a) (leftRotate_16((a), 7))
#define leftRotate8_16(a) (leftRotate_16((a), 8))
#define leftRotate9_16(a) (leftRotate_16((a), 9))
#define leftRotate10_16(a) (leftRotate_16((a), 10))
#define leftRotate11_16(a) (leftRotate_16((a), 11))
#define leftRotate12_16(a) (leftRotate_16((a), 12))
#define leftRotate13_16(a) (leftRotate_16((a), 13))
#define leftRotate14_16(a) (leftRotate_16((a), 14))
#define leftRotate15_16(a) (leftRotate_16((a), 15))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_16(a) (rightRotate_16((a), 1))
#define rightRotate2_16(a) (rightRotate_16((a), 2))
#define rightRotate3_16(a) (rightRotate_16((a), 3))
#define rightRotate4_16(a) (rightRotate_16((a), 4))
#define rightRotate5_16(a) (rightRotate_16((a), 5))
#define rightRotate6_16(a) (rightRotate_16((a), 6))
#define rightRotate7_16(a) (rightRotate_16((a), 7))
#define rightRotate8_16(a) (rightRotate_16((a), 8))
#define rightRotate9_16(a) (rightRotate_16((a), 9))
#define rightRotate10_16(a) (rightRotate_16((a), 10))
#define rightRotate11_16(a) (rightRotate_16((a), 11))
#define rightRotate12_16(a) (rightRotate_16((a), 12))
#define rightRotate13_16(a) (rightRotate_16((a), 13))
#define rightRotate14_16(a) (rightRotate_16((a), 14))
#define rightRotate15_16(a) (rightRotate_16((a), 15))
/* Rotate an 8-bit value left by a number of bits */
#define leftRotate_8(a, bits) \
(__extension__ ({ \
uint8_t _temp = (a); \
(_temp << (bits)) | (_temp >> (8 - (bits))); \
}))
/* Rotate an 8-bit value right by a number of bits */
#define rightRotate_8(a, bits) \
(__extension__ ({ \
uint8_t _temp = (a); \
(_temp >> (bits)) | (_temp << (8 - (bits))); \
}))
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1_8(a) (leftRotate_8((a), 1))
#define leftRotate2_8(a) (leftRotate_8((a), 2))
#define leftRotate3_8(a) (leftRotate_8((a), 3))
#define leftRotate4_8(a) (leftRotate_8((a), 4))
#define leftRotate5_8(a) (leftRotate_8((a), 5))
#define leftRotate6_8(a) (leftRotate_8((a), 6))
#define leftRotate7_8(a) (leftRotate_8((a), 7))
/* Right rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define rightRotate1_8(a) (rightRotate_8((a), 1))
#define rightRotate2_8(a) (rightRotate_8((a), 2))
#define rightRotate3_8(a) (rightRotate_8((a), 3))
#define rightRotate4_8(a) (rightRotate_8((a), 4))
#define rightRotate5_8(a) (rightRotate_8((a), 5))
#define rightRotate6_8(a) (rightRotate_8((a), 6))
#define rightRotate7_8(a) (rightRotate_8((a), 7))
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "skinny-aead.h"
#include "internal-skinny128.h"
#include "internal-util.h"
#include <string.h>
aead_cipher_t const skinny_aead_m1_cipher = {
"SKINNY-AEAD-M1",
SKINNY_AEAD_KEY_SIZE,
SKINNY_AEAD_M1_NONCE_SIZE,
SKINNY_AEAD_M1_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
skinny_aead_m1_encrypt,
skinny_aead_m1_decrypt
};
aead_cipher_t const skinny_aead_m2_cipher = {
"SKINNY-AEAD-M2",
SKINNY_AEAD_KEY_SIZE,
SKINNY_AEAD_M2_NONCE_SIZE,
SKINNY_AEAD_M2_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
skinny_aead_m2_encrypt,
skinny_aead_m2_decrypt
};
aead_cipher_t const skinny_aead_m3_cipher = {
"SKINNY-AEAD-M3",
SKINNY_AEAD_KEY_SIZE,
SKINNY_AEAD_M3_NONCE_SIZE,
SKINNY_AEAD_M3_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
skinny_aead_m3_encrypt,
skinny_aead_m3_decrypt
};
aead_cipher_t const skinny_aead_m4_cipher = {
"SKINNY-AEAD-M4",
SKINNY_AEAD_KEY_SIZE,
SKINNY_AEAD_M4_NONCE_SIZE,
SKINNY_AEAD_M4_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
skinny_aead_m4_encrypt,
skinny_aead_m4_decrypt
};
aead_cipher_t const skinny_aead_m5_cipher = {
"SKINNY-AEAD-M5",
SKINNY_AEAD_KEY_SIZE,
SKINNY_AEAD_M5_NONCE_SIZE,
SKINNY_AEAD_M5_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
skinny_aead_m5_encrypt,
skinny_aead_m5_decrypt
};
aead_cipher_t const skinny_aead_m6_cipher = {
"SKINNY-AEAD-M6",
SKINNY_AEAD_KEY_SIZE,
SKINNY_AEAD_M6_NONCE_SIZE,
SKINNY_AEAD_M6_TAG_SIZE,
AEAD_FLAG_LITTLE_ENDIAN,
skinny_aead_m6_encrypt,
skinny_aead_m6_decrypt
};
/* Domain separator prefixes for all of the SKINNY-AEAD family members */
#define DOMAIN_SEP_M1 0x00
#define DOMAIN_SEP_M2 0x10
#define DOMAIN_SEP_M3 0x08
#define DOMAIN_SEP_M4 0x18
#define DOMAIN_SEP_M5 0x10
#define DOMAIN_SEP_M6 0x18
/**
* \brief Initialize the key and nonce for SKINNY-128-384 based AEAD schemes.
*
* \param ks The key schedule to initialize.
* \param key Points to the 16 bytes of the key.
* \param nonce Points to the nonce.
* \param nonce_len Length of the nonce in bytes.
*/
static void skinny_aead_128_384_init
(skinny_128_384_key_schedule_t *ks, const unsigned char *key,
const unsigned char *nonce, unsigned nonce_len)
{
unsigned char k[48];
memset(k, 0, 16);
memcpy(k + 16, nonce, nonce_len);
memset(k + 16 + nonce_len, 0, 16 - nonce_len);
memcpy(k + 32, key, 16);
skinny_128_384_init(ks, k);
}
/**
* \brief Set the domain separation value in the tweak for SKINNY-128-384.
*
* \param ks Key schedule for the block cipher.
* \param d Domain separation value to write into the tweak.
*/
#define skinny_aead_128_384_set_domain(ks,d) ((ks)->TK1[15] = (d))
/**
* \brief Sets the LFSR field in the tweak for SKINNY-128-384.
*
* \param ks Key schedule for the block cipher.
* \param lfsr 64-bit LFSR value.
*/
#define skinny_aead_128_384_set_lfsr(ks,lfsr) le_store_word64((ks)->TK1, (lfsr))
/**
* \brief Updates the LFSR value for SKINNY-128-384.
*
* \param lfsr 64-bit LFSR value to be updated.
*/
#define skinny_aead_128_384_update_lfsr(lfsr) \
do { \
uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \
(lfsr) = ((lfsr) << 1) ^ feedback; \
} while (0)
/**
* \brief Authenticates the associated data for a SKINNY-128-384 based AEAD.
*
* \param ks The key schedule to use.
* \param prefix Domain separation prefix for the family member.
* \param tag Final tag to XOR the authentication checksum into.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void skinny_aead_128_384_authenticate
(skinny_128_384_key_schedule_t *ks, unsigned char prefix,
unsigned char tag[SKINNY_128_BLOCK_SIZE],
const unsigned char *ad, unsigned long long adlen)
{
unsigned char block[SKINNY_128_BLOCK_SIZE];
uint64_t lfsr = 1;
skinny_aead_128_384_set_domain(ks, prefix | 2);
while (adlen >= SKINNY_128_BLOCK_SIZE) {
skinny_aead_128_384_set_lfsr(ks, lfsr);
skinny_128_384_encrypt(ks, block, ad);
lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE);
ad += SKINNY_128_BLOCK_SIZE;
adlen -= SKINNY_128_BLOCK_SIZE;
skinny_aead_128_384_update_lfsr(lfsr);
}
if (adlen > 0) {
unsigned temp = (unsigned)adlen;
skinny_aead_128_384_set_lfsr(ks, lfsr);
skinny_aead_128_384_set_domain(ks, prefix | 3);
memcpy(block, ad, temp);
block[temp] = 0x80;
memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1);
skinny_128_384_encrypt(ks, block, block);
lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE);
}
}
/**
* \brief Encrypts the plaintext for a SKINNY-128-384 based AEAD.
*
* \param ks The key schedule to use.
* \param prefix Domain separation prefix for the family member.
* \param sum Authenticated checksum over the plaintext.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the plaintext buffer.
* \param mlen Number of bytes of plaintext to be encrypted.
*/
static void skinny_aead_128_384_encrypt
(skinny_128_384_key_schedule_t *ks, unsigned char prefix,
unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c,
const unsigned char *m, unsigned long long mlen)
{
unsigned char block[SKINNY_128_BLOCK_SIZE];
uint64_t lfsr = 1;
memset(sum, 0, SKINNY_128_BLOCK_SIZE);
skinny_aead_128_384_set_domain(ks, prefix | 0);
while (mlen >= SKINNY_128_BLOCK_SIZE) {
skinny_aead_128_384_set_lfsr(ks, lfsr);
lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE);
skinny_128_384_encrypt(ks, c, m);
c += SKINNY_128_BLOCK_SIZE;
m += SKINNY_128_BLOCK_SIZE;
mlen -= SKINNY_128_BLOCK_SIZE;
skinny_aead_128_384_update_lfsr(lfsr);
}
skinny_aead_128_384_set_lfsr(ks, lfsr);
if (mlen > 0) {
unsigned temp = (unsigned)mlen;
skinny_aead_128_384_set_domain(ks, prefix | 1);
lw_xor_block(sum, m, temp);
sum[temp] ^= 0x80;
memset(block, 0, SKINNY_128_BLOCK_SIZE);
skinny_128_384_encrypt(ks, block, block);
lw_xor_block_2_src(c, block, m, temp);
skinny_aead_128_384_update_lfsr(lfsr);
skinny_aead_128_384_set_lfsr(ks, lfsr);
skinny_aead_128_384_set_domain(ks, prefix | 5);
} else {
skinny_aead_128_384_set_domain(ks, prefix | 4);
}
skinny_128_384_encrypt(ks, sum, sum);
}
/**
* \brief Decrypts the ciphertext for a SKINNY-128-384 based AEAD.
*
* \param ks The key schedule to use.
* \param prefix Domain separation prefix for the family member.
* \param sum Authenticated checksum over the plaintext.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the ciphertext buffer.
* \param mlen Number of bytes of ciphertext to be decrypted.
*/
static void skinny_aead_128_384_decrypt
(skinny_128_384_key_schedule_t *ks, unsigned char prefix,
unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m,
const unsigned char *c, unsigned long long mlen)
{
unsigned char block[SKINNY_128_BLOCK_SIZE];
uint64_t lfsr = 1;
memset(sum, 0, SKINNY_128_BLOCK_SIZE);
skinny_aead_128_384_set_domain(ks, prefix | 0);
while (mlen >= SKINNY_128_BLOCK_SIZE) {
skinny_aead_128_384_set_lfsr(ks, lfsr);
skinny_128_384_decrypt(ks, m, c);
lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE);
c += SKINNY_128_BLOCK_SIZE;
m += SKINNY_128_BLOCK_SIZE;
mlen -= SKINNY_128_BLOCK_SIZE;
skinny_aead_128_384_update_lfsr(lfsr);
}
skinny_aead_128_384_set_lfsr(ks, lfsr);
if (mlen > 0) {
unsigned temp = (unsigned)mlen;
skinny_aead_128_384_set_domain(ks, prefix | 1);
memset(block, 0, SKINNY_128_BLOCK_SIZE);
skinny_128_384_encrypt(ks, block, block);
lw_xor_block_2_src(m, block, c, temp);
lw_xor_block(sum, m, temp);
sum[temp] ^= 0x80;
skinny_aead_128_384_update_lfsr(lfsr);
skinny_aead_128_384_set_lfsr(ks, lfsr);
skinny_aead_128_384_set_domain(ks, prefix | 5);
} else {
skinny_aead_128_384_set_domain(ks, prefix | 4);
}
skinny_128_384_encrypt(ks, sum, sum);
}
int skinny_aead_m1_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + SKINNY_AEAD_M1_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE);
/* Encrypt to plaintext to produce the ciphertext */
skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M1, sum, c, m, mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen);
/* Generate the authentication tag */
memcpy(c + mlen, sum, SKINNY_AEAD_M1_TAG_SIZE);
return 0;
}
int skinny_aead_m1_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < SKINNY_AEAD_M1_TAG_SIZE)
return -1;
*mlen = clen - SKINNY_AEAD_M1_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE);
/* Decrypt to ciphertext to produce the plaintext */
skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M1, sum, m, c, *mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen);
/* Check the authentication tag */
return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M1_TAG_SIZE);
}
int skinny_aead_m2_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + SKINNY_AEAD_M2_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE);
/* Encrypt to plaintext to produce the ciphertext */
skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M2, sum, c, m, mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen);
/* Generate the authentication tag */
memcpy(c + mlen, sum, SKINNY_AEAD_M2_TAG_SIZE);
return 0;
}
int skinny_aead_m2_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < SKINNY_AEAD_M2_TAG_SIZE)
return -1;
*mlen = clen - SKINNY_AEAD_M2_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE);
/* Decrypt to ciphertext to produce the plaintext */
skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M2, sum, m, c, *mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen);
/* Check the authentication tag */
return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M2_TAG_SIZE);
}
int skinny_aead_m3_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + SKINNY_AEAD_M3_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE);
/* Encrypt to plaintext to produce the ciphertext */
skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M3, sum, c, m, mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen);
/* Generate the authentication tag */
memcpy(c + mlen, sum, SKINNY_AEAD_M3_TAG_SIZE);
return 0;
}
int skinny_aead_m3_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < SKINNY_AEAD_M3_TAG_SIZE)
return -1;
*mlen = clen - SKINNY_AEAD_M3_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE);
/* Decrypt to ciphertext to produce the plaintext */
skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M3, sum, m, c, *mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen);
/* Check the authentication tag */
return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M3_TAG_SIZE);
}
int skinny_aead_m4_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + SKINNY_AEAD_M4_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE);
/* Encrypt to plaintext to produce the ciphertext */
skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M4, sum, c, m, mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen);
/* Generate the authentication tag */
memcpy(c + mlen, sum, SKINNY_AEAD_M4_TAG_SIZE);
return 0;
}
int skinny_aead_m4_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_384_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < SKINNY_AEAD_M4_TAG_SIZE)
return -1;
*mlen = clen - SKINNY_AEAD_M4_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE);
/* Decrypt to ciphertext to produce the plaintext */
skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M4, sum, m, c, *mlen);
/* Process the associated data */
skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen);
/* Check the authentication tag */
return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M4_TAG_SIZE);
}
/**
* \brief Initialize the key and nonce for SKINNY-128-256 based AEAD schemes.
*
* \param ks The key schedule to initialize.
* \param key Points to the 16 bytes of the key.
* \param nonce Points to the nonce.
* \param nonce_len Length of the nonce in bytes.
*/
static void skinny_aead_128_256_init
(skinny_128_256_key_schedule_t *ks, const unsigned char *key,
const unsigned char *nonce, unsigned nonce_len)
{
unsigned char k[32];
memset(k, 0, 16 - nonce_len);
memcpy(k + 16 - nonce_len, nonce, nonce_len);
memcpy(k + 16, key, 16);
skinny_128_256_init(ks, k);
}
/**
* \brief Set the domain separation value in the tweak for SKINNY-128-256.
*
* \param ks Key schedule for the block cipher.
* \param d Domain separation value to write into the tweak.
*/
#define skinny_aead_128_256_set_domain(ks,d) ((ks)->TK1[3] = (d))
/**
* \brief Sets the LFSR field in the tweak for SKINNY-128-256.
*
* \param ks Key schedule for the block cipher.
* \param lfsr 24-bit LFSR value.
*/
#define skinny_aead_128_256_set_lfsr(ks,lfsr) \
do { \
(ks)->TK1[0] = (uint8_t)(lfsr); \
(ks)->TK1[1] = (uint8_t)((lfsr) >> 8); \
(ks)->TK1[2] = (uint8_t)((lfsr) >> 16); \
} while (0)
/**
* \brief Updates the LFSR value for SKINNY-128-256.
*
* \param lfsr 24-bit LFSR value to be updated.
*/
#define skinny_aead_128_256_update_lfsr(lfsr) \
do { \
uint32_t feedback = ((lfsr) & (((uint32_t)1) << 23)) ? 0x1B : 0x00; \
(lfsr) = ((lfsr) << 1) ^ (feedback); \
} while (0)
/**
* \brief Authenticates the associated data for a SKINNY-128-256 based AEAD.
*
* \param ks The key schedule to use.
* \param prefix Domain separation prefix for the family member.
* \param tag Final tag to XOR the authentication checksum into.
* \param ad Points to the associated data.
* \param adlen Length of the associated data in bytes.
*/
static void skinny_aead_128_256_authenticate
(skinny_128_256_key_schedule_t *ks, unsigned char prefix,
unsigned char tag[SKINNY_128_BLOCK_SIZE],
const unsigned char *ad, unsigned long long adlen)
{
unsigned char block[SKINNY_128_BLOCK_SIZE];
uint32_t lfsr = 1;
skinny_aead_128_256_set_domain(ks, prefix | 2);
while (adlen >= SKINNY_128_BLOCK_SIZE) {
skinny_aead_128_256_set_lfsr(ks, lfsr);
skinny_128_256_encrypt(ks, block, ad);
lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE);
ad += SKINNY_128_BLOCK_SIZE;
adlen -= SKINNY_128_BLOCK_SIZE;
skinny_aead_128_256_update_lfsr(lfsr);
}
if (adlen > 0) {
unsigned temp = (unsigned)adlen;
skinny_aead_128_256_set_lfsr(ks, lfsr);
skinny_aead_128_256_set_domain(ks, prefix | 3);
memcpy(block, ad, temp);
block[temp] = 0x80;
memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1);
skinny_128_256_encrypt(ks, block, block);
lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE);
}
}
/**
* \brief Encrypts the plaintext for a SKINNY-128-256 based AEAD.
*
* \param ks The key schedule to use.
* \param prefix Domain separation prefix for the family member.
* \param sum Authenticated checksum over the plaintext.
* \param c Points to the buffer to receive the ciphertext.
* \param m Points to the plaintext buffer.
* \param mlen Number of bytes of plaintext to be encrypted.
*/
static void skinny_aead_128_256_encrypt
(skinny_128_256_key_schedule_t *ks, unsigned char prefix,
unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c,
const unsigned char *m, unsigned long long mlen)
{
unsigned char block[SKINNY_128_BLOCK_SIZE];
uint32_t lfsr = 1;
memset(sum, 0, SKINNY_128_BLOCK_SIZE);
skinny_aead_128_256_set_domain(ks, prefix | 0);
while (mlen >= SKINNY_128_BLOCK_SIZE) {
skinny_aead_128_256_set_lfsr(ks, lfsr);
lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE);
skinny_128_256_encrypt(ks, c, m);
c += SKINNY_128_BLOCK_SIZE;
m += SKINNY_128_BLOCK_SIZE;
mlen -= SKINNY_128_BLOCK_SIZE;
skinny_aead_128_256_update_lfsr(lfsr);
}
skinny_aead_128_256_set_lfsr(ks, lfsr);
if (mlen > 0) {
unsigned temp = (unsigned)mlen;
skinny_aead_128_256_set_domain(ks, prefix | 1);
lw_xor_block(sum, m, temp);
sum[temp] ^= 0x80;
memset(block, 0, SKINNY_128_BLOCK_SIZE);
skinny_128_256_encrypt(ks, block, block);
lw_xor_block_2_src(c, block, m, temp);
skinny_aead_128_256_update_lfsr(lfsr);
skinny_aead_128_256_set_lfsr(ks, lfsr);
skinny_aead_128_256_set_domain(ks, prefix | 5);
} else {
skinny_aead_128_256_set_domain(ks, prefix | 4);
}
skinny_128_256_encrypt(ks, sum, sum);
}
/**
* \brief Decrypts the ciphertext for a SKINNY-128-256 based AEAD.
*
* \param ks The key schedule to use.
* \param prefix Domain separation prefix for the family member.
* \param sum Authenticated checksum over the plaintext.
* \param m Points to the buffer to receive the plaintext.
* \param c Points to the ciphertext buffer.
* \param mlen Number of bytes of ciphertext to be decrypted.
*/
static void skinny_aead_128_256_decrypt
(skinny_128_256_key_schedule_t *ks, unsigned char prefix,
unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m,
const unsigned char *c, unsigned long long mlen)
{
unsigned char block[SKINNY_128_BLOCK_SIZE];
uint32_t lfsr = 1;
memset(sum, 0, SKINNY_128_BLOCK_SIZE);
skinny_aead_128_256_set_domain(ks, prefix | 0);
while (mlen >= SKINNY_128_BLOCK_SIZE) {
skinny_aead_128_256_set_lfsr(ks, lfsr);
skinny_128_256_decrypt(ks, m, c);
lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE);
c += SKINNY_128_BLOCK_SIZE;
m += SKINNY_128_BLOCK_SIZE;
mlen -= SKINNY_128_BLOCK_SIZE;
skinny_aead_128_256_update_lfsr(lfsr);
}
skinny_aead_128_256_set_lfsr(ks, lfsr);
if (mlen > 0) {
unsigned temp = (unsigned)mlen;
skinny_aead_128_256_set_domain(ks, prefix | 1);
memset(block, 0, SKINNY_128_BLOCK_SIZE);
skinny_128_256_encrypt(ks, block, block);
lw_xor_block_2_src(m, block, c, temp);
lw_xor_block(sum, m, temp);
sum[temp] ^= 0x80;
skinny_aead_128_256_update_lfsr(lfsr);
skinny_aead_128_256_set_lfsr(ks, lfsr);
skinny_aead_128_256_set_domain(ks, prefix | 5);
} else {
skinny_aead_128_256_set_domain(ks, prefix | 4);
}
skinny_128_256_encrypt(ks, sum, sum);
}
int skinny_aead_m5_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + SKINNY_AEAD_M5_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE);
/* Encrypt to plaintext to produce the ciphertext */
skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M5, sum, c, m, mlen);
/* Process the associated data */
skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen);
/* Generate the authentication tag */
memcpy(c + mlen, sum, SKINNY_AEAD_M5_TAG_SIZE);
return 0;
}
int skinny_aead_m5_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < SKINNY_AEAD_M5_TAG_SIZE)
return -1;
*mlen = clen - SKINNY_AEAD_M5_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE);
/* Decrypt to ciphertext to produce the plaintext */
skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M5, sum, m, c, *mlen);
/* Process the associated data */
skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen);
/* Check the authentication tag */
return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M5_TAG_SIZE);
}
int skinny_aead_m6_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Set the length of the returned ciphertext */
*clen = mlen + SKINNY_AEAD_M6_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE);
/* Encrypt to plaintext to produce the ciphertext */
skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M6, sum, c, m, mlen);
/* Process the associated data */
skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen);
/* Generate the authentication tag */
memcpy(c + mlen, sum, SKINNY_AEAD_M6_TAG_SIZE);
return 0;
}
int skinny_aead_m6_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
skinny_128_256_key_schedule_t ks;
unsigned char sum[SKINNY_128_BLOCK_SIZE];
(void)nsec;
/* Validate the ciphertext length and set the return "mlen" value */
if (clen < SKINNY_AEAD_M6_TAG_SIZE)
return -1;
*mlen = clen - SKINNY_AEAD_M6_TAG_SIZE;
/* Set up the key schedule with the key and the nonce */
skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE);
/* Decrypt to ciphertext to produce the plaintext */
skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M6, sum, m, c, *mlen);
/* Process the associated data */
skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen);
/* Check the authentication tag */
return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M6_TAG_SIZE);
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LWCRYPTO_SKINNY_AEAD_H
#define LWCRYPTO_SKINNY_AEAD_H
#include "aead-common.h"
/**
* \file skinny-aead.h
* \brief Authenticated encryption based on the SKINNY block cipher.
*
* SKINNY-AEAD is a family of authenticated encryption algorithms
* that are built around the SKINNY tweakable block cipher. There
* are six members in the family:
*
* \li SKINNY-AEAD-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher. This is the
* primary member of the family.
* \li SKINNY-AEAD-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li SKINNY-AEAD-M3 has a 128-bit key, a 128-bit nonce, and a 64-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li SKINNY-AEAD-M4 has a 128-bit key, a 96-bit nonce, and a 64-bit tag,
* based around the SKINNY-128-384 tweakable block cipher.
* \li SKINNY-AEAD-M5 has a 128-bit key, a 96-bit nonce, and a 128-bit tag,
* based around the SKINNY-128-256 tweakable block cipher.
* \li SKINNY-AEAD-M6 has a 128-bit key, a 96-bit nonce, and a 64-bit tag,
* based around the SKINNY-128-256 tweakable block cipher.
*
* The SKINNY-AEAD family also includes two hash algorithms:
*
* \li SKINNY-tk3-HASH with a 256-bit hash output, based around the
* SKINNY-128-384 tweakable block cipher. This is the primary hashing
* member of the family.
* \li SKINNY-tk2-HASH with a 256-bit hash output, based around the
* SKINNY-128-256 tweakable block cipher.
*
* References: https://sites.google.com/site/skinnycipher/home
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Size of the key for all SKINNY-AEAD family members.
*/
#define SKINNY_AEAD_KEY_SIZE 16
/**
* \brief Size of the authentication tag for SKINNY-AEAD-M1.
*/
#define SKINNY_AEAD_M1_TAG_SIZE 16
/**
* \brief Size of the nonce for SKINNY-AEAD-M1.
*/
#define SKINNY_AEAD_M1_NONCE_SIZE 16
/**
* \brief Size of the authentication tag for SKINNY-AEAD-M2.
*/
#define SKINNY_AEAD_M2_TAG_SIZE 16
/**
* \brief Size of the nonce for SKINNY-AEAD-M2.
*/
#define SKINNY_AEAD_M2_NONCE_SIZE 12
/**
* \brief Size of the authentication tag for SKINNY-AEAD-M3.
*/
#define SKINNY_AEAD_M3_TAG_SIZE 8
/**
* \brief Size of the nonce for SKINNY-AEAD-M3.
*/
#define SKINNY_AEAD_M3_NONCE_SIZE 16
/**
* \brief Size of the authentication tag for SKINNY-AEAD-M4.
*/
#define SKINNY_AEAD_M4_TAG_SIZE 8
/**
* \brief Size of the nonce for SKINNY-AEAD-M4.
*/
#define SKINNY_AEAD_M4_NONCE_SIZE 12
/**
* \brief Size of the authentication tag for SKINNY-AEAD-M5.
*/
#define SKINNY_AEAD_M5_TAG_SIZE 16
/**
* \brief Size of the nonce for SKINNY-AEAD-M5.
*/
#define SKINNY_AEAD_M5_NONCE_SIZE 12
/**
* \brief Size of the authentication tag for SKINNY-AEAD-M6.
*/
#define SKINNY_AEAD_M6_TAG_SIZE 8
/**
* \brief Size of the nonce for SKINNY-AEAD-M6.
*/
#define SKINNY_AEAD_M6_NONCE_SIZE 12
/**
* \brief Meta-information block for the SKINNY-AEAD-M1 cipher.
*/
extern aead_cipher_t const skinny_aead_m1_cipher;
/**
* \brief Meta-information block for the SKINNY-AEAD-M2 cipher.
*/
extern aead_cipher_t const skinny_aead_m2_cipher;
/**
* \brief Meta-information block for the SKINNY-AEAD-M3 cipher.
*/
extern aead_cipher_t const skinny_aead_m3_cipher;
/**
* \brief Meta-information block for the SKINNY-AEAD-M4 cipher.
*/
extern aead_cipher_t const skinny_aead_m4_cipher;
/**
* \brief Meta-information block for the SKINNY-AEAD-M5 cipher.
*/
extern aead_cipher_t const skinny_aead_m5_cipher;
/**
* \brief Meta-information block for the SKINNY-AEAD-M6 cipher.
*/
extern aead_cipher_t const skinny_aead_m6_cipher;
/**
* \brief Encrypts and authenticates a packet with SKINNY-AEAD-M1.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa skinny_aead_m1_decrypt()
*/
int skinny_aead_m1_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with SKINNY-AEAD-M1.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa skinny_aead_m1_encrypt()
*/
int skinny_aead_m1_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with SKINNY-AEAD-M2.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa skinny_aead_m2_decrypt()
*/
int skinny_aead_m2_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with SKINNY-AEAD-M2.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa skinny_aead_m2_encrypt()
*/
int skinny_aead_m2_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with SKINNY-AEAD-M3.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 8 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa skinny_aead_m3_decrypt()
*/
int skinny_aead_m3_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with SKINNY-AEAD-M3.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 8 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 16 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa skinny_aead_m3_encrypt()
*/
int skinny_aead_m3_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with SKINNY-AEAD-M4.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 8 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa skinny_aead_m4_decrypt()
*/
int skinny_aead_m4_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with SKINNY-AEAD-M4.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 8 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa skinny_aead_m4_encrypt()
*/
int skinny_aead_m4_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with SKINNY-AEAD-M5.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 16 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa skinny_aead_m5_decrypt()
*/
int skinny_aead_m5_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with SKINNY-AEAD-M5.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 16 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa skinny_aead_m5_encrypt()
*/
int skinny_aead_m5_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Encrypts and authenticates a packet with SKINNY-AEAD-M6.
*
* \param c Buffer to receive the output.
* \param clen On exit, set to the length of the output which includes
* the ciphertext and the 8 byte authentication tag.
* \param m Buffer that contains the plaintext message to encrypt.
* \param mlen Length of the plaintext message in bytes.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param nsec Secret nonce - not used by this algorithm.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to encrypt the packet.
*
* \return 0 on success, or a negative value if there was an error in
* the parameters.
*
* \sa skinny_aead_m6_decrypt()
*/
int skinny_aead_m6_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k);
/**
* \brief Decrypts and authenticates a packet with SKINNY-AEAD-M6.
*
* \param m Buffer to receive the plaintext message on output.
* \param mlen Receives the length of the plaintext message on output.
* \param nsec Secret nonce - not used by this algorithm.
* \param c Buffer that contains the ciphertext and authentication
* tag to decrypt.
* \param clen Length of the input data in bytes, which includes the
* ciphertext and the 8 byte authentication tag.
* \param ad Buffer that contains associated data to authenticate
* along with the packet but which does not need to be encrypted.
* \param adlen Length of the associated data in bytes.
* \param npub Points to the public nonce for the packet which must
* be 12 bytes in length.
* \param k Points to the 16 bytes of the key to use to decrypt the packet.
*
* \return 0 on success, -1 if the authentication tag was incorrect,
* or some other negative number if there was an error in the parameters.
*
* \sa skinny_aead_m6_encrypt()
*/
int skinny_aead_m6_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k);
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "skinny-hash.h"
#include "internal-skinny128.h"
#include "internal-util.h"
#include <string.h>
aead_hash_algorithm_t const skinny_tk3_hash_algorithm = {
"SKINNY-tk3-HASH",
sizeof(int),
SKINNY_HASH_SIZE,
AEAD_FLAG_NONE,
skinny_tk3_hash,
(aead_hash_init_t)0,
(aead_hash_update_t)0,
(aead_hash_finalize_t)0,
(aead_xof_absorb_t)0,
(aead_xof_squeeze_t)0
};
aead_hash_algorithm_t const skinny_tk2_hash_algorithm = {
"SKINNY-tk2-HASH",
sizeof(int),
SKINNY_HASH_SIZE,
AEAD_FLAG_NONE,
skinny_tk2_hash,
(aead_hash_init_t)0,
(aead_hash_update_t)0,
(aead_hash_finalize_t)0,
(aead_xof_absorb_t)0,
(aead_xof_squeeze_t)0
};
/**
* \brief Size of the permutation state for SKINNY-tk3-HASH.
*/
#define SKINNY_TK3_STATE_SIZE 48
/**
* \brief Size of the permutation state for SKINNY-tk2-HASH.
*/
#define SKINNY_TK2_STATE_SIZE 32
/**
* \brief Rate of absorbing data for SKINNY-tk3-HASH.
*/
#define SKINNY_TK3_HASH_RATE 16
/**
* \brief Rate of absorbing data for SKINNY-tk2-HASH.
*/
#define SKINNY_TK2_HASH_RATE 4
/**
* \brief Input block that is encrypted with the state for each
* block permutation of SKINNY-tk3-HASH or SKINNY-tk2-HASH.
*/
static unsigned char const skinny_hash_block[48] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/**
* \brief Permutes the internal state for SKINNY-tk3-HASH.
*
* \param state The state to be permuted.
*/
static void skinny_tk3_permute(unsigned char state[SKINNY_TK3_STATE_SIZE])
{
unsigned char temp[SKINNY_TK3_STATE_SIZE];
skinny_128_384_encrypt_tk_full(state, temp, skinny_hash_block);
skinny_128_384_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16);
skinny_128_384_encrypt_tk_full(state, temp + 32, skinny_hash_block + 32);
memcpy(state, temp, SKINNY_TK3_STATE_SIZE);
}
/**
* \brief Permutes the internal state for SKINNY-tk2-HASH.
*
* \param state The state to be permuted.
*/
static void skinny_tk2_permute(unsigned char state[SKINNY_TK2_STATE_SIZE])
{
unsigned char temp[SKINNY_TK2_STATE_SIZE];
skinny_128_256_encrypt_tk_full(state, temp, skinny_hash_block);
skinny_128_256_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16);
memcpy(state, temp, SKINNY_TK2_STATE_SIZE);
}
int skinny_tk3_hash
(unsigned char *out, const unsigned char *in, unsigned long long inlen)
{
unsigned char state[SKINNY_TK3_STATE_SIZE];
unsigned temp;
/* Initialize the hash state */
memset(state, 0, sizeof(state));
state[SKINNY_TK3_HASH_RATE] = 0x80;
/* Process as many full blocks as possible */
while (inlen >= SKINNY_TK3_HASH_RATE) {
lw_xor_block(state, in, SKINNY_TK3_HASH_RATE);
skinny_tk3_permute(state);
in += SKINNY_TK3_HASH_RATE;
inlen -= SKINNY_TK3_HASH_RATE;
}
/* Pad and process the last block */
temp = (unsigned)inlen;
lw_xor_block(state, in, temp);
state[temp] ^= 0x80; /* padding */
skinny_tk3_permute(state);
/* Generate the hash output */
memcpy(out, state, 16);
skinny_tk3_permute(state);
memcpy(out + 16, state, 16);
return 0;
}
int skinny_tk2_hash
(unsigned char *out, const unsigned char *in, unsigned long long inlen)
{
unsigned char state[SKINNY_TK2_STATE_SIZE];
unsigned temp;
/* Initialize the hash state */
memset(state, 0, sizeof(state));
state[SKINNY_TK2_HASH_RATE] = 0x80;
/* Process as many full blocks as possible */
while (inlen >= SKINNY_TK2_HASH_RATE) {
lw_xor_block(state, in, SKINNY_TK2_HASH_RATE);
skinny_tk2_permute(state);
in += SKINNY_TK2_HASH_RATE;
inlen -= SKINNY_TK2_HASH_RATE;
}
/* Pad and process the last block */
temp = (unsigned)inlen;
lw_xor_block(state, in, temp);
state[temp] ^= 0x80; /* padding */
skinny_tk2_permute(state);
/* Generate the hash output */
memcpy(out, state, 16);
skinny_tk2_permute(state);
memcpy(out + 16, state, 16);
return 0;
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LWCRYPTO_SKINNY_HASH_H
#define LWCRYPTO_SKINNY_HASH_H
#include "aead-common.h"
/**
* \file skinny-hash.h
* \brief Hash algorithms based on the SKINNY block cipher.
*
* The SKINNY-AEAD family includes two hash algorithms:
*
* \li SKINNY-tk3-HASH with a 256-bit hash output, based around the
* SKINNY-128-384 tweakable block cipher. This is the primary hashing
* member of the family.
* \li SKINNY-tk2-HASH with a 256-bit hash output, based around the
* SKINNY-128-256 tweakable block cipher.
*
* References: https://sites.google.com/site/skinnycipher/home
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Size of the hash output for SKINNY-tk3-HASH and SKINNY-tk2-HASH.
*/
#define SKINNY_HASH_SIZE 32
/**
* \brief Meta-information block for the SKINNY-tk3-HASH algorithm.
*/
extern aead_hash_algorithm_t const skinny_tk3_hash_algorithm;
/**
* \brief Meta-information block for the SKINNY-tk2-HASH algorithm.
*/
extern aead_hash_algorithm_t const skinny_tk2_hash_algorithm;
/**
* \brief Hashes a block of input data with SKINNY-tk3-HASH to
* generate a hash value.
*
* \param out Buffer to receive the hash output which must be at least
* SKINNY_HASH_SIZE bytes in length.
* \param in Points to the input data to be hashed.
* \param inlen Length of the input data in bytes.
*
* \return Returns zero on success or -1 if there was an error in the
* parameters.
*/
int skinny_tk3_hash
(unsigned char *out, const unsigned char *in, unsigned long long inlen);
/**
* \brief Hashes a block of input data with SKINNY-tk2-HASH to
* generate a hash value.
*
* \param out Buffer to receive the hash output which must be at least
* SKINNY_HASH_SIZE bytes in length.
* \param in Points to the input data to be hashed.
* \param inlen Length of the input data in bytes.
*
* \return Returns zero on success or -1 if there was an error in the
* parameters.
*/
int skinny_tk2_hash
(unsigned char *out, const unsigned char *in, unsigned long long inlen);
#ifdef __cplusplus
}
#endif
#endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment